ua_nodestore_concurrent.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. #include "ua_util.h"
  2. #include "ua_nodestore.h"
  3. #define ALIVE_BIT (1 << 15) /* Alive bit in the refcount */
  4. struct nodeEntry {
  5. struct cds_lfht_node htn; /* contains next-ptr for urcu-hashmap */
  6. struct rcu_head rcu_head; /* For call-rcu */
  7. UA_UInt16 refcount; /* Counts the amount of readers on it [alive-bit, 15 counter-bits] */
  8. UA_Node node; /* Might be cast from any _bigger_ UA_Node* type. Allocate enough memory! */
  9. };
  10. struct UA_NodeStore {
  11. struct cds_lfht *ht; /* Hash table */
  12. };
  13. #include "ua_nodestore_hash.inc"
  14. static void node_deleteMembers(UA_Node *node) {
  15. switch(node->nodeClass) {
  16. case UA_NODECLASS_OBJECT:
  17. UA_ObjectNode_deleteMembers((UA_ObjectNode *)node);
  18. break;
  19. case UA_NODECLASS_VARIABLE:
  20. UA_VariableNode_deleteMembers((UA_VariableNode *)node);
  21. break;
  22. case UA_NODECLASS_METHOD:
  23. UA_MethodNode_deleteMembers((UA_MethodNode *)node);
  24. break;
  25. case UA_NODECLASS_OBJECTTYPE:
  26. UA_ObjectTypeNode_deleteMembers((UA_ObjectTypeNode *)node);
  27. break;
  28. case UA_NODECLASS_VARIABLETYPE:
  29. UA_VariableTypeNode_deleteMembers((UA_VariableTypeNode *)node);
  30. break;
  31. case UA_NODECLASS_REFERENCETYPE:
  32. UA_ReferenceTypeNode_deleteMembers((UA_ReferenceTypeNode *)node);
  33. break;
  34. case UA_NODECLASS_DATATYPE:
  35. UA_DataTypeNode_deleteMembers((UA_DataTypeNode *)node);
  36. break;
  37. case UA_NODECLASS_VIEW:
  38. UA_ViewNode_deleteMembers((UA_ViewNode *)node);
  39. break;
  40. default:
  41. UA_assert(UA_FALSE);
  42. break;
  43. }
  44. }
  45. /* We are in a rcu_read lock. So the node will not be freed under our feet. */
  46. static int compare(struct cds_lfht_node *htn, const void *orig) {
  47. const UA_NodeId *origid = (const UA_NodeId *)orig;
  48. const UA_NodeId *newid = &((struct nodeEntry *)htn)->node.nodeId; /* The htn is first in the entry structure. */
  49. return UA_NodeId_equal(newid, origid);
  50. }
  51. /* The entry was removed from the hashtable. No more readers can get it. Since
  52. all readers using the node for a longer time (outside the rcu critical
  53. section) increased the refcount, we only need to wait for the refcount
  54. to reach zero. */
  55. static void markDead(struct rcu_head *head) {
  56. struct nodeEntry *entry = (struct nodeEntry*) ((uintptr_t)head - offsetof(struct nodeEntry, rcu_head));
  57. uatomic_and(&entry->refcount, ~ALIVE_BIT); // set the alive bit to zero
  58. if(uatomic_read(&entry->refcount) > 0)
  59. return;
  60. node_deleteMembers(&entry->node);
  61. UA_free(entry);
  62. }
  63. /* Free the entry if it is dead and nobody uses it anymore */
  64. void UA_NodeStore_release(const UA_Node *managed) {
  65. struct nodeEntry *entry = (struct nodeEntry*) ((uintptr_t)managed - offsetof(struct nodeEntry, node));
  66. if(uatomic_add_return(&entry->refcount, -1) == 0) {
  67. node_deleteMembers(&entry->node);
  68. UA_free(entry);
  69. }
  70. }
  71. UA_NodeStore * UA_NodeStore_new() {
  72. UA_NodeStore *ns;
  73. if(!(ns = UA_malloc(sizeof(UA_NodeStore))))
  74. return UA_NULL;
  75. /* 32 is the minimum size for the hashtable. */
  76. ns->ht = cds_lfht_new(32, 32, 0, CDS_LFHT_AUTO_RESIZE, NULL);
  77. if(!ns->ht) {
  78. UA_free(ns);
  79. return UA_NULL;
  80. }
  81. return ns;
  82. }
  83. void UA_NodeStore_delete(UA_NodeStore *ns) {
  84. struct cds_lfht *ht = ns->ht;
  85. struct cds_lfht_iter iter;
  86. rcu_read_lock();
  87. cds_lfht_first(ht, &iter);
  88. while(iter.node) {
  89. if(!cds_lfht_del(ht, iter.node)) {
  90. struct nodeEntry *entry = (struct nodeEntry*) ((uintptr_t)iter.node - offsetof(struct nodeEntry, htn));
  91. call_rcu(&entry->rcu_head, markDead);
  92. }
  93. cds_lfht_next(ht, &iter);
  94. }
  95. rcu_read_unlock();
  96. cds_lfht_destroy(ht, UA_NULL);
  97. UA_free(ns);
  98. }
  99. UA_StatusCode UA_NodeStore_insert(UA_NodeStore *ns, UA_Node *node, const UA_Node **inserted) {
  100. size_t nodesize;
  101. /* Copy the node into the entry. Then reset the original node. It shall no longer be used. */
  102. switch(node->nodeClass) {
  103. case UA_NODECLASS_OBJECT:
  104. nodesize = sizeof(UA_ObjectNode);
  105. break;
  106. case UA_NODECLASS_VARIABLE:
  107. nodesize = sizeof(UA_VariableNode);
  108. break;
  109. case UA_NODECLASS_METHOD:
  110. nodesize = sizeof(UA_MethodNode);
  111. break;
  112. case UA_NODECLASS_OBJECTTYPE:
  113. nodesize = sizeof(UA_ObjectTypeNode);
  114. break;
  115. case UA_NODECLASS_VARIABLETYPE:
  116. nodesize = sizeof(UA_VariableTypeNode);
  117. break;
  118. case UA_NODECLASS_REFERENCETYPE:
  119. nodesize = sizeof(UA_ReferenceTypeNode);
  120. break;
  121. case UA_NODECLASS_DATATYPE:
  122. nodesize = sizeof(UA_DataTypeNode);
  123. break;
  124. case UA_NODECLASS_VIEW:
  125. nodesize = sizeof(UA_ViewNode);
  126. break;
  127. default:
  128. return UA_STATUSCODE_BADINTERNALERROR;
  129. }
  130. struct nodeEntry *entry;
  131. if(!(entry = UA_malloc(sizeof(struct nodeEntry) - sizeof(UA_Node) + nodesize)))
  132. return UA_STATUSCODE_BADOUTOFMEMORY;
  133. UA_memcpy((void*)&entry->node, node, nodesize);
  134. cds_lfht_node_init(&entry->htn);
  135. entry->refcount = ALIVE_BIT;
  136. if(inserted) // increase the counter before adding the node
  137. entry->refcount++;
  138. struct cds_lfht_node *result;
  139. //FIXME: a bit dirty workaround of preserving namespace
  140. //namespace index is assumed to be valid
  141. UA_NodeId tempNodeid;
  142. UA_NodeId_copy(&node->nodeId, &tempNodeid);
  143. tempNodeid.namespaceIndex = 0;
  144. if(!UA_NodeId_isNull(&tempNodeid)) {
  145. hash_t h = hash(&node->nodeId);
  146. rcu_read_lock();
  147. result = cds_lfht_add_unique(ns->ht, h, compare, &entry->node.nodeId, &entry->htn);
  148. rcu_read_unlock();
  149. /* If the nodeid exists already */
  150. if(result != &entry->htn) {
  151. UA_free(entry);
  152. return UA_STATUSCODE_BADNODEIDEXISTS;
  153. }
  154. } else {
  155. /* create a unique nodeid */
  156. ((UA_Node *)&entry->node)->nodeId.identifierType = UA_NODEIDTYPE_NUMERIC;
  157. if(((UA_Node *)&entry->node)->nodeId.namespaceIndex == 0) //original request for ns=0 should yield ns=1
  158. ((UA_Node *)&entry->node)->nodeId.namespaceIndex = 1;
  159. if(((UA_Node *)&entry->node)->nodeClass==UA_NODECLASS_VARIABLE){ //set namespaceIndex in browseName in case id is generated
  160. ((UA_VariableNode*)&entry->node)->browseName.namespaceIndex=((UA_Node *)&entry->node)->nodeId.namespaceIndex;
  161. }
  162. unsigned long identifier;
  163. long before, after;
  164. rcu_read_lock();
  165. cds_lfht_count_nodes(ns->ht, &before, &identifier, &after); // current amount of nodes stored
  166. identifier++;
  167. ((UA_Node *)&entry->node)->nodeId.identifier.numeric = identifier;
  168. while(UA_TRUE) {
  169. hash_t nhash = hash(&entry->node.nodeId);
  170. result = cds_lfht_add_unique(ns->ht, nhash, compare, &entry->node.nodeId, &entry->htn);
  171. if(result == &entry->htn)
  172. break;
  173. ((UA_Node *)&entry->node)->nodeId.identifier.numeric += (identifier * 2654435761);
  174. }
  175. rcu_read_unlock();
  176. }
  177. UA_free(node);
  178. if(inserted)
  179. *inserted = &entry->node;
  180. return UA_STATUSCODE_GOOD;
  181. }
  182. UA_StatusCode UA_NodeStore_replace(UA_NodeStore *ns, const UA_Node *oldNode, UA_Node *node,
  183. const UA_Node **inserted) {
  184. size_t nodesize;
  185. /* Copy the node into the entry. Then reset the original node. It shall no longer be used. */
  186. switch(node->nodeClass) {
  187. case UA_NODECLASS_OBJECT:
  188. nodesize = sizeof(UA_ObjectNode);
  189. break;
  190. case UA_NODECLASS_VARIABLE:
  191. nodesize = sizeof(UA_VariableNode);
  192. break;
  193. case UA_NODECLASS_METHOD:
  194. nodesize = sizeof(UA_MethodNode);
  195. break;
  196. case UA_NODECLASS_OBJECTTYPE:
  197. nodesize = sizeof(UA_ObjectTypeNode);
  198. break;
  199. case UA_NODECLASS_VARIABLETYPE:
  200. nodesize = sizeof(UA_VariableTypeNode);
  201. break;
  202. case UA_NODECLASS_REFERENCETYPE:
  203. nodesize = sizeof(UA_ReferenceTypeNode);
  204. break;
  205. case UA_NODECLASS_DATATYPE:
  206. nodesize = sizeof(UA_DataTypeNode);
  207. break;
  208. case UA_NODECLASS_VIEW:
  209. nodesize = sizeof(UA_ViewNode);
  210. break;
  211. default:
  212. return UA_STATUSCODE_BADINTERNALERROR;
  213. }
  214. struct nodeEntry *newEntry;
  215. if(!(newEntry = UA_malloc(sizeof(struct nodeEntry) - sizeof(UA_Node) + nodesize)))
  216. return UA_STATUSCODE_BADOUTOFMEMORY;
  217. UA_memcpy((void*)&newEntry->node, node, nodesize);
  218. cds_lfht_node_init(&newEntry->htn);
  219. newEntry->refcount = ALIVE_BIT;
  220. if(inserted) // increase the counter before adding the node
  221. newEntry->refcount++;
  222. hash_t h = hash(&node->nodeId);
  223. struct cds_lfht_iter iter;
  224. rcu_read_lock();
  225. cds_lfht_lookup(ns->ht, h, compare, &node->nodeId, &iter);
  226. /* No node found that can be replaced */
  227. if(!iter.node) {
  228. rcu_read_unlock();
  229. UA_free(newEntry);
  230. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  231. }
  232. struct nodeEntry *oldEntry = (struct nodeEntry*) ((uintptr_t)iter.node - offsetof(struct nodeEntry, htn));
  233. /* The node we found is obsolete*/
  234. if(&oldEntry->node != oldNode) {
  235. rcu_read_unlock();
  236. UA_free(newEntry);
  237. return UA_STATUSCODE_BADINTERNALERROR;
  238. }
  239. /* The old node is replaced by a managed node. */
  240. if(cds_lfht_replace(ns->ht, &iter, h, compare, &node->nodeId, &newEntry->htn) != 0) {
  241. /* Replacing failed. Maybe the node got replaced just before this thread tried to.*/
  242. rcu_read_unlock();
  243. UA_free(newEntry);
  244. return UA_STATUSCODE_BADINTERNALERROR;
  245. }
  246. /* If an entry got replaced, mark it as dead. */
  247. call_rcu(&oldEntry->rcu_head, markDead);
  248. rcu_read_unlock();
  249. UA_free(node);
  250. if(inserted)
  251. *inserted = &newEntry->node;
  252. return UA_STATUSCODE_GOOD;
  253. }
  254. UA_StatusCode UA_NodeStore_remove(UA_NodeStore *ns, const UA_NodeId *nodeid) {
  255. hash_t nhash = hash(nodeid);
  256. struct cds_lfht_iter iter;
  257. rcu_read_lock();
  258. /* If this fails, then the node has already been removed. */
  259. cds_lfht_lookup(ns->ht, nhash, compare, &nodeid, &iter);
  260. if(!iter.node || cds_lfht_del(ns->ht, iter.node) != 0) {
  261. rcu_read_unlock();
  262. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  263. }
  264. struct nodeEntry *entry = (struct nodeEntry*) ((uintptr_t)iter.node - offsetof(struct nodeEntry, htn));
  265. call_rcu(&entry->rcu_head, markDead);
  266. rcu_read_unlock();
  267. return UA_STATUSCODE_GOOD;
  268. }
  269. const UA_Node * UA_NodeStore_get(const UA_NodeStore *ns, const UA_NodeId *nodeid) {
  270. hash_t nhash = hash(nodeid);
  271. struct cds_lfht_iter iter;
  272. rcu_read_lock();
  273. cds_lfht_lookup(ns->ht, nhash, compare, nodeid, &iter);
  274. struct nodeEntry *found_entry = (struct nodeEntry *)cds_lfht_iter_get_node(&iter);
  275. if(!found_entry) {
  276. rcu_read_unlock();
  277. return UA_NULL;
  278. }
  279. /* This is done within a read-lock. The node will not be marked dead within a read-lock. */
  280. uatomic_inc(&found_entry->refcount);
  281. rcu_read_unlock();
  282. return &found_entry->node;
  283. }
  284. void UA_NodeStore_iterate(const UA_NodeStore *ns, UA_NodeStore_nodeVisitor visitor) {
  285. struct cds_lfht *ht = ns->ht;
  286. struct cds_lfht_iter iter;
  287. rcu_read_lock();
  288. cds_lfht_first(ht, &iter);
  289. while(iter.node != UA_NULL) {
  290. struct nodeEntry *found_entry = (struct nodeEntry *)cds_lfht_iter_get_node(&iter);
  291. uatomic_inc(&found_entry->refcount);
  292. const UA_Node *node = &found_entry->node;
  293. rcu_read_unlock();
  294. visitor(node);
  295. UA_NodeStore_release((const UA_Node *)node);
  296. rcu_read_lock();
  297. cds_lfht_next(ht, &iter);
  298. }
  299. rcu_read_unlock();
  300. }