ua_nodestore_concurrent.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. #include "ua_nodestore.h"
  2. #include "ua_util.h"
  3. #include <urcu.h>
  4. #include <urcu/compiler.h> // for caa_container_of
  5. #include <urcu/uatomic.h>
  6. #include <urcu/rculfhash.h>
  7. #define ALIVE_BIT (1 << 15) /* Alive bit in the refcount */
  8. typedef struct UA_NodeStore_Entry {
  9. struct cds_lfht_node htn; /* contains next-ptr for urcu-hashmap */
  10. struct rcu_head rcu_head; /* For call-rcu */
  11. UA_UInt16 refcount; /* Counts the amount of readers on it [alive-bit, 15 counter-bits] */
  12. const UA_Node node; /* Might be cast from any _bigger_ UA_Node* type. Allocate enough memory! */
  13. } UA_NodeStore_Entry;
  14. struct UA_NodeStore {
  15. struct cds_lfht *ht; /* Hash table */
  16. };
  17. #include "ua_nodestore_hash.inc"
  18. static inline void node_deleteMembers(const UA_Node *node) {
  19. switch(node->nodeClass) {
  20. case UA_NODECLASS_OBJECT:
  21. UA_ObjectNode_deleteMembers((UA_ObjectNode *)node);
  22. break;
  23. case UA_NODECLASS_VARIABLE:
  24. UA_VariableNode_deleteMembers((UA_VariableNode *)node);
  25. break;
  26. case UA_NODECLASS_METHOD:
  27. UA_MethodNode_deleteMembers((UA_MethodNode *)node);
  28. break;
  29. case UA_NODECLASS_OBJECTTYPE:
  30. UA_ObjectTypeNode_deleteMembers((UA_ObjectTypeNode *)node);
  31. break;
  32. case UA_NODECLASS_VARIABLETYPE:
  33. UA_VariableTypeNode_deleteMembers((UA_VariableTypeNode *)node);
  34. break;
  35. case UA_NODECLASS_REFERENCETYPE:
  36. UA_ReferenceTypeNode_deleteMembers((UA_ReferenceTypeNode *)node);
  37. break;
  38. case UA_NODECLASS_DATATYPE:
  39. UA_DataTypeNode_deleteMembers((UA_DataTypeNode *)node);
  40. break;
  41. case UA_NODECLASS_VIEW:
  42. UA_ViewNode_deleteMembers((UA_ViewNode *)node);
  43. break;
  44. default:
  45. UA_assert(UA_FALSE);
  46. break;
  47. }
  48. }
  49. /* We are in a rcu_read lock. So the node will not be freed under our feet. */
  50. static int compare(struct cds_lfht_node *htn, const void *orig) {
  51. const UA_NodeId *origid = (const UA_NodeId *)orig;
  52. const UA_NodeId *newid = &((UA_NodeStore_Entry *)htn)->node.nodeId; /* The htn is first in the entry structure. */
  53. return UA_NodeId_equal(newid, origid);
  54. }
  55. /* The entry was removed from the hashtable. No more readers can get it. Since
  56. all readers using the node for a longer time (outside the rcu critical
  57. section) increased the refcount, we only need to wait for the refcount
  58. to reach zero. */
  59. static void markDead(struct rcu_head *head) {
  60. UA_NodeStore_Entry *entry = caa_container_of(head, UA_NodeStore_Entry, rcu_head);
  61. uatomic_and(&entry->refcount, ~ALIVE_BIT); // set the alive bit to zero
  62. if(uatomic_read(&entry->refcount) > 0)
  63. return;
  64. node_deleteMembers(&entry->node);
  65. UA_free(entry);
  66. return;
  67. }
  68. /* Free the entry if it is dead and nobody uses it anymore */
  69. void UA_NodeStore_release(const UA_Node *managed) {
  70. UA_NodeStore_Entry *entry = caa_container_of(managed, UA_NodeStore_Entry, node); // pointer to the first entry
  71. if(uatomic_sub_return(&entry->refcount, 1) > 0)
  72. return;
  73. node_deleteMembers(managed);
  74. UA_free(entry);
  75. return;
  76. }
  77. UA_NodeStore * UA_NodeStore_new() {
  78. UA_NodeStore *ns;
  79. if(!(ns = UA_alloc(sizeof(UA_NodeStore))))
  80. return UA_NULL;
  81. /* 32 is the minimum size for the hashtable. */
  82. ns->ht = cds_lfht_new(32, 32, 0, CDS_LFHT_AUTO_RESIZE, NULL);
  83. if(!ns->ht) {
  84. UA_free(ns);
  85. return UA_NULL;
  86. }
  87. return ns;
  88. }
  89. void UA_NodeStore_delete(UA_NodeStore *ns) {
  90. struct cds_lfht *ht = ns->ht;
  91. struct cds_lfht_iter iter;
  92. struct cds_lfht_node *found_htn;
  93. rcu_read_lock();
  94. cds_lfht_first(ht, &iter);
  95. while(iter.node != UA_NULL) {
  96. found_htn = cds_lfht_iter_get_node(&iter);
  97. if(!cds_lfht_del(ht, found_htn)) {
  98. UA_NodeStore_Entry *entry = caa_container_of(found_htn, UA_NodeStore_Entry, htn);
  99. call_rcu(&entry->rcu_head, markDead);
  100. }
  101. cds_lfht_next(ht, &iter);
  102. }
  103. rcu_read_unlock();
  104. cds_lfht_destroy(ht, UA_NULL);
  105. UA_free(ns);
  106. }
  107. UA_StatusCode UA_NodeStore_insert(UA_NodeStore *ns, const UA_Node **node, UA_Boolean getManaged) {
  108. UA_UInt32 nodesize;
  109. /* Copy the node into the entry. Then reset the original node. It shall no longer be used. */
  110. switch((*node)->nodeClass) {
  111. case UA_NODECLASS_OBJECT:
  112. nodesize = sizeof(UA_ObjectNode);
  113. break;
  114. case UA_NODECLASS_VARIABLE:
  115. nodesize = sizeof(UA_VariableNode);
  116. break;
  117. case UA_NODECLASS_METHOD:
  118. nodesize = sizeof(UA_MethodNode);
  119. break;
  120. case UA_NODECLASS_OBJECTTYPE:
  121. nodesize = sizeof(UA_ObjectTypeNode);
  122. break;
  123. case UA_NODECLASS_VARIABLETYPE:
  124. nodesize = sizeof(UA_VariableTypeNode);
  125. break;
  126. case UA_NODECLASS_REFERENCETYPE:
  127. nodesize = sizeof(UA_ReferenceTypeNode);
  128. break;
  129. case UA_NODECLASS_DATATYPE:
  130. nodesize = sizeof(UA_DataTypeNode);
  131. break;
  132. case UA_NODECLASS_VIEW:
  133. nodesize = sizeof(UA_ViewNode);
  134. break;
  135. default:
  136. return UA_STATUSCODE_BADINTERNALERROR;
  137. }
  138. UA_NodeStore_Entry *entry;
  139. if(!(entry = UA_alloc(sizeof(UA_NodeStore_Entry) - sizeof(UA_Node) + nodesize)))
  140. return UA_STATUSCODE_BADOUTOFMEMORY;
  141. memcpy((void*)&entry->node, *node, nodesize);
  142. cds_lfht_node_init(&entry->htn);
  143. entry->refcount = ALIVE_BIT;
  144. if(getManaged) // increase the counter before adding the node
  145. entry->refcount++;
  146. struct cds_lfht_node *result;
  147. if(!UA_NodeId_isNull(&(*node)->nodeId)) {
  148. hash_t h = hash(&(*node)->nodeId);
  149. rcu_read_lock();
  150. result = cds_lfht_add_unique(ns->ht, h, compare, &entry->node.nodeId, &entry->htn);
  151. rcu_read_unlock();
  152. /* If the nodeid exists already */
  153. if(result != &entry->htn) {
  154. UA_free(entry);
  155. return UA_STATUSCODE_BADNODEIDEXISTS;
  156. }
  157. } else {
  158. /* create a unique nodeid */
  159. ((UA_Node *)&entry->node)->nodeId.identifierType = UA_NODEIDTYPE_NUMERIC;
  160. ((UA_Node *)&entry->node)->nodeId.namespaceIndex = 1; // namespace 1 is always in the local nodestore
  161. unsigned long identifier;
  162. long before, after;
  163. rcu_read_lock();
  164. cds_lfht_count_nodes(ns->ht, &before, &identifier, &after); // current amount of nodes stored
  165. rcu_read_unlock();
  166. identifier++;
  167. ((UA_Node *)&entry->node)->nodeId.identifier.numeric = identifier;
  168. while(UA_TRUE) {
  169. hash_t nhash = hash(&entry->node.nodeId);
  170. rcu_read_lock();
  171. result = cds_lfht_add_unique(ns->ht, nhash, compare, &entry->node.nodeId, &entry->htn);
  172. rcu_read_unlock();
  173. if(result == &entry->htn)
  174. break;
  175. ((UA_Node *)&entry->node)->nodeId.identifier.numeric += (identifier * 2654435761);
  176. }
  177. }
  178. UA_free((UA_Node *)*node); /* The old node is replaced by a managed node. */
  179. if(getManaged)
  180. *node = &entry->node;
  181. else
  182. *node = UA_NULL;
  183. return UA_STATUSCODE_GOOD;
  184. }
  185. UA_StatusCode UA_NodeStore_replace(UA_NodeStore *ns, const UA_Node **node, UA_Boolean getManaged) {
  186. UA_UInt32 nodesize;
  187. /* Copy the node into the entry. Then reset the original node. It shall no longer be used. */
  188. switch((*node)->nodeClass) {
  189. case UA_NODECLASS_OBJECT:
  190. nodesize = sizeof(UA_ObjectNode);
  191. break;
  192. case UA_NODECLASS_VARIABLE:
  193. nodesize = sizeof(UA_VariableNode);
  194. break;
  195. case UA_NODECLASS_METHOD:
  196. nodesize = sizeof(UA_MethodNode);
  197. break;
  198. case UA_NODECLASS_OBJECTTYPE:
  199. nodesize = sizeof(UA_ObjectTypeNode);
  200. break;
  201. case UA_NODECLASS_VARIABLETYPE:
  202. nodesize = sizeof(UA_VariableTypeNode);
  203. break;
  204. case UA_NODECLASS_REFERENCETYPE:
  205. nodesize = sizeof(UA_ReferenceTypeNode);
  206. break;
  207. case UA_NODECLASS_DATATYPE:
  208. nodesize = sizeof(UA_DataTypeNode);
  209. break;
  210. case UA_NODECLASS_VIEW:
  211. nodesize = sizeof(UA_ViewNode);
  212. break;
  213. default:
  214. return UA_STATUSCODE_BADINTERNALERROR;
  215. }
  216. UA_NodeStore_Entry *entry;
  217. if(!(entry = UA_alloc(sizeof(UA_NodeStore_Entry) - sizeof(UA_Node) + nodesize)))
  218. return UA_STATUSCODE_BADOUTOFMEMORY;
  219. memcpy((void*)&entry->node, *node, nodesize);
  220. cds_lfht_node_init(&entry->htn);
  221. entry->refcount = ALIVE_BIT;
  222. if(getManaged) // increase the counter before adding the node
  223. entry->refcount++;
  224. hash_t h = hash(&(*node)->nodeId);
  225. struct cds_lfht_iter iter;
  226. rcu_read_lock();
  227. cds_lfht_lookup(ns->ht, h, compare, &(*node)->nodeId, &iter);
  228. struct cds_lfht_node *result = cds_lfht_iter_get_node(&iter);
  229. if(result && cds_lfht_replace(ns->ht, &iter, h, compare, &(*node)->nodeId, &entry->htn) == 0) {
  230. UA_NodeStore_Entry *entry = caa_container_of(result, UA_NodeStore_Entry, htn);
  231. /* If an entry got replaced, mark it as dead. */
  232. call_rcu(&entry->rcu_head, markDead);
  233. rcu_read_unlock();
  234. } else {
  235. rcu_read_unlock();
  236. UA_free(entry);
  237. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  238. }
  239. /* The old node is replaced by a managed node. */
  240. UA_free((UA_Node *)*node);
  241. if(getManaged)
  242. *node = &entry->node;
  243. else
  244. *node = UA_NULL;
  245. return UA_STATUSCODE_GOOD;
  246. }
  247. UA_StatusCode UA_NodeStore_remove(UA_NodeStore *ns, const UA_NodeId *nodeid) {
  248. hash_t nhash = hash(nodeid);
  249. struct cds_lfht_iter iter;
  250. rcu_read_lock();
  251. cds_lfht_lookup(ns->ht, nhash, compare, &nodeid, &iter);
  252. struct cds_lfht_node *found_htn = cds_lfht_iter_get_node(&iter);
  253. /* If this fails, then the node has already been removed. */
  254. if(!found_htn || cds_lfht_del(ns->ht, found_htn) != 0) {
  255. rcu_read_unlock();
  256. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  257. }
  258. UA_NodeStore_Entry *entry = caa_container_of(found_htn, UA_NodeStore_Entry, htn);
  259. call_rcu(&entry->rcu_head, markDead);
  260. rcu_read_unlock();
  261. return UA_STATUSCODE_GOOD;
  262. }
  263. const UA_Node * UA_NodeStore_get(const UA_NodeStore *ns, const UA_NodeId *nodeid) {
  264. hash_t nhash = hash(nodeid);
  265. struct cds_lfht_iter iter;
  266. rcu_read_lock();
  267. cds_lfht_lookup(ns->ht, nhash, compare, nodeid, &iter);
  268. UA_NodeStore_Entry *found_entry = (UA_NodeStore_Entry *)cds_lfht_iter_get_node(&iter);
  269. if(!found_entry) {
  270. rcu_read_unlock();
  271. return UA_NULL;
  272. }
  273. /* This is done within a read-lock. The node will not be marked dead within a read-lock. */
  274. uatomic_inc(&found_entry->refcount);
  275. rcu_read_unlock();
  276. return &found_entry->node;
  277. }
  278. void UA_NodeStore_iterate(const UA_NodeStore *ns, UA_NodeStore_nodeVisitor visitor) {
  279. struct cds_lfht *ht = ns->ht;
  280. struct cds_lfht_iter iter;
  281. rcu_read_lock();
  282. cds_lfht_first(ht, &iter);
  283. while(iter.node != UA_NULL) {
  284. UA_NodeStore_Entry *found_entry = (UA_NodeStore_Entry *)cds_lfht_iter_get_node(&iter);
  285. uatomic_inc(&found_entry->refcount);
  286. const UA_Node *node = &found_entry->node;
  287. rcu_read_unlock();
  288. visitor(node);
  289. UA_NodeStore_release((UA_Node *)node);
  290. rcu_read_lock();
  291. cds_lfht_next(ht, &iter);
  292. }
  293. rcu_read_unlock();
  294. }