ua_nodestore_concurrent.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. #include "ua_nodestore.h"
  2. #include "ua_util.h"
  3. #include <urcu.h>
  4. #include <urcu/compiler.h> // for caa_container_of
  5. #include <urcu/uatomic.h>
  6. #include <urcu/rculfhash.h>
  7. #define ALIVE_BIT (1 << 15) /* Alive bit in the readcount */
  8. typedef struct UA_NodeStore_Entry {
  9. struct cds_lfht_node htn; /* contains next-ptr for urcu-hashmap */
  10. struct rcu_head rcu_head; /* For call-rcu */
  11. UA_UInt16 readcount; /* Counts the amount of readers on it [alive-bit, 15 counter-bits] */
  12. UA_Node node; /* Might be cast from any _bigger_ UA_Node* type. Allocate enough memory! */
  13. } UA_NodeStore_Entry;
  14. struct UA_NodeStore {
  15. struct cds_lfht *ht; /* Hash table */
  16. };
  17. /********/
  18. /* Hash */
  19. /********/
  20. typedef UA_UInt32 hash_t;
  21. /* Based on Murmur-Hash 3 by Austin Appleby (public domain, freely usable) */
  22. static INLINE hash_t hash_array(const UA_Byte *data, UA_UInt32 len, UA_UInt32 seed) {
  23. static const uint32_t c1 = 0xcc9e2d51;
  24. static const uint32_t c2 = 0x1b873593;
  25. static const uint32_t r1 = 15;
  26. static const uint32_t r2 = 13;
  27. static const uint32_t m = 5;
  28. static const uint32_t n = 0xe6546b64;
  29. hash_t hash = seed;
  30. if(data == UA_NULL) return 0;
  31. const int32_t nblocks = len / 4;
  32. const uint32_t *blocks = (const uint32_t *)data;
  33. for(int32_t i = 0;i < nblocks;i++) {
  34. uint32_t k = blocks[i];
  35. k *= c1;
  36. k = (k << r1) | (k >> (32 - r1));
  37. k *= c2;
  38. hash ^= k;
  39. hash = ((hash << r2) | (hash >> (32 - r2))) * m + n;
  40. }
  41. const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);
  42. uint32_t k1 = 0;
  43. switch(len & 3) {
  44. case 3:
  45. k1 ^= tail[2] << 16;
  46. case 2:
  47. k1 ^= tail[1] << 8;
  48. case 1:
  49. k1 ^= tail[0];
  50. k1 *= c1;
  51. k1 = (k1 << r1) | (k1 >> (32 - r1));
  52. k1 *= c2;
  53. hash ^= k1;
  54. }
  55. hash ^= len;
  56. hash ^= (hash >> 16);
  57. hash *= 0x85ebca6b;
  58. hash ^= (hash >> 13);
  59. hash *= 0xc2b2ae35;
  60. hash ^= (hash >> 16);
  61. return hash;
  62. }
  63. static INLINE hash_t hash(const UA_NodeId *n) {
  64. switch(n->identifierType) {
  65. case UA_NODEIDTYPE_NUMERIC:
  66. /* Knuth's multiplicative hashing */
  67. return (n->identifier.numeric + n->namespaceIndex) * 2654435761; // mod(2^32) is implicit
  68. case UA_NODEIDTYPE_STRING:
  69. return hash_array(n->identifier.string.data, n->identifier.string.length, n->namespaceIndex);
  70. case UA_NODEIDTYPE_GUID:
  71. return hash_array((UA_Byte *)&(n->identifier.guid), sizeof(UA_Guid), n->namespaceIndex);
  72. case UA_NODEIDTYPE_BYTESTRING:
  73. return hash_array((UA_Byte *)n->identifier.byteString.data, n->identifier.byteString.length, n->namespaceIndex);
  74. default:
  75. UA_assert(UA_FALSE);
  76. return 0;
  77. }
  78. }
  79. /****************/
  80. /* UA_NodeStore */
  81. /****************/
  82. static inline void node_deleteMembers(const UA_Node *node) {
  83. switch(node->nodeClass) {
  84. case UA_NODECLASS_OBJECT:
  85. UA_ObjectNode_deleteMembers((UA_ObjectNode *)node);
  86. break;
  87. case UA_NODECLASS_VARIABLE:
  88. UA_VariableNode_deleteMembers((UA_VariableNode *)node);
  89. break;
  90. case UA_NODECLASS_METHOD:
  91. UA_MethodNode_deleteMembers((UA_MethodNode *)node);
  92. break;
  93. case UA_NODECLASS_OBJECTTYPE:
  94. UA_ObjectTypeNode_deleteMembers((UA_ObjectTypeNode *)node);
  95. break;
  96. case UA_NODECLASS_VARIABLETYPE:
  97. UA_VariableTypeNode_deleteMembers((UA_VariableTypeNode *)node);
  98. break;
  99. case UA_NODECLASS_REFERENCETYPE:
  100. UA_ReferenceTypeNode_deleteMembers((UA_ReferenceTypeNode *)node);
  101. break;
  102. case UA_NODECLASS_DATATYPE:
  103. UA_DataTypeNode_deleteMembers((UA_DataTypeNode *)node);
  104. break;
  105. case UA_NODECLASS_VIEW:
  106. UA_ViewNode_deleteMembers((UA_ViewNode *)node);
  107. break;
  108. default:
  109. UA_assert(UA_FALSE);
  110. break;
  111. }
  112. }
  113. /* We are in a rcu_read lock. So the node will not be freed under our feet. */
  114. static int compare(struct cds_lfht_node *htn, const void *orig) {
  115. UA_NodeId *origid = (UA_NodeId *)orig;
  116. UA_NodeId *newid = &((UA_NodeStore_Entry *)htn)->node.nodeId; /* The htn is first in the entry structure. */
  117. return UA_NodeId_equal(newid, origid) == UA_EQUAL;
  118. }
  119. /* The entry was removed from the hashtable. No more readers can get it. Since
  120. all readers using the node for a longer time (outside the rcu critical
  121. section) increased the readcount, we only need to wait for the readcount
  122. to reach zero. */
  123. static void markDead(struct rcu_head *head) {
  124. UA_NodeStore_Entry *entry = caa_container_of(head, UA_NodeStore_Entry, rcu_head);
  125. if(uatomic_sub_return(&entry->readcount, ALIVE_BIT) > 0)
  126. return;
  127. node_deleteMembers(&entry->node);
  128. UA_free(entry);
  129. return;
  130. }
  131. /* Free the entry if it is dead and nobody uses it anymore */
  132. void UA_NodeStore_releaseManagedNode(const UA_Node *managed) {
  133. if(managed == UA_NULL)
  134. return;
  135. UA_NodeStore_Entry *entry = caa_container_of(managed, UA_NodeStore_Entry, node); // pointer to the first entry
  136. if(uatomic_sub_return(&entry->readcount, 1) > 0)
  137. return;
  138. node_deleteMembers(managed);
  139. UA_free(entry);
  140. return;
  141. }
  142. UA_StatusCode UA_NodeStore_new(UA_NodeStore **result) {
  143. UA_NodeStore *ns;
  144. if(!(ns = UA_alloc(sizeof(UA_NodeStore))))
  145. return UA_STATUSCODE_BADOUTOFMEMORY;
  146. /* 32 is the minimum size for the hashtable. */
  147. ns->ht = cds_lfht_new(32, 32, 0, CDS_LFHT_AUTO_RESIZE, NULL);
  148. if(!ns->ht) {
  149. UA_free(ns);
  150. return UA_STATUSCODE_BADOUTOFMEMORY;
  151. }
  152. *result = ns;
  153. return UA_STATUSCODE_GOOD;
  154. }
  155. void UA_NodeStore_delete(UA_NodeStore *ns) {
  156. struct cds_lfht *ht = ns->ht;
  157. struct cds_lfht_iter iter;
  158. struct cds_lfht_node *found_htn;
  159. rcu_read_lock();
  160. cds_lfht_first(ht, &iter);
  161. while(iter.node != UA_NULL) {
  162. found_htn = cds_lfht_iter_get_node(&iter);
  163. if(!cds_lfht_del(ht, found_htn)) {
  164. UA_NodeStore_Entry *entry = caa_container_of(found_htn, UA_NodeStore_Entry, htn);
  165. call_rcu(&entry->rcu_head, markDead);
  166. }
  167. cds_lfht_next(ht, &iter);
  168. }
  169. rcu_read_unlock();
  170. cds_lfht_destroy(ht, UA_NULL);
  171. UA_free(ns);
  172. }
  173. UA_StatusCode UA_NodeStore_insert(UA_NodeStore *ns, UA_Node **node, UA_Byte flags) {
  174. UA_UInt32 nodesize;
  175. /* Copy the node into the entry. Then reset the original node. It shall no longer be used. */
  176. switch((*node)->nodeClass) {
  177. case UA_NODECLASS_OBJECT:
  178. nodesize = sizeof(UA_ObjectNode);
  179. break;
  180. case UA_NODECLASS_VARIABLE:
  181. nodesize = sizeof(UA_VariableNode);
  182. break;
  183. case UA_NODECLASS_METHOD:
  184. nodesize = sizeof(UA_MethodNode);
  185. break;
  186. case UA_NODECLASS_OBJECTTYPE:
  187. nodesize = sizeof(UA_ObjectTypeNode);
  188. break;
  189. case UA_NODECLASS_VARIABLETYPE:
  190. nodesize = sizeof(UA_VariableTypeNode);
  191. break;
  192. case UA_NODECLASS_REFERENCETYPE:
  193. nodesize = sizeof(UA_ReferenceTypeNode);
  194. break;
  195. case UA_NODECLASS_DATATYPE:
  196. nodesize = sizeof(UA_DataTypeNode);
  197. break;
  198. case UA_NODECLASS_VIEW:
  199. nodesize = sizeof(UA_ViewNode);
  200. break;
  201. default:
  202. return UA_STATUSCODE_BADINTERNALERROR;
  203. }
  204. UA_NodeStore_Entry *entry;
  205. if(!(entry = UA_alloc(sizeof(UA_NodeStore_Entry) - sizeof(UA_Node) + nodesize)))
  206. return UA_STATUSCODE_BADOUTOFMEMORY;
  207. memcpy(&entry->node, *node, nodesize);
  208. cds_lfht_node_init(&entry->htn);
  209. entry->readcount = ALIVE_BIT;
  210. if(flags & UA_NODESTORE_INSERT_GETMANAGED)
  211. entry->readcount++;
  212. hash_t nhash = hash(&(*node)->nodeId);
  213. struct cds_lfht_node *result;
  214. if(flags & UA_NODESTORE_INSERT_UNIQUE) {
  215. rcu_read_lock();
  216. result = cds_lfht_add_unique(ns->ht, nhash, compare, &entry->node.nodeId, &entry->htn);
  217. rcu_read_unlock();
  218. /* If the nodeid exists already */
  219. if(result != &entry->htn) {
  220. UA_free(entry);
  221. return UA_STATUSCODE_BADNODEIDEXISTS;
  222. }
  223. } else {
  224. rcu_read_lock();
  225. result = cds_lfht_add_replace(ns->ht, nhash, compare, &(*node)->nodeId, &entry->htn);
  226. /* If an entry got replaced, mark it as dead. */
  227. if(result) {
  228. UA_NodeStore_Entry *entry = caa_container_of(result, UA_NodeStore_Entry, htn);
  229. call_rcu(&entry->rcu_head, markDead); /* Queue this for the next time when no readers are on the entry.*/
  230. }
  231. rcu_read_unlock();
  232. }
  233. UA_free((UA_Node *)*node); /* The old node is replaced by a managed node. */
  234. if(flags & UA_NODESTORE_INSERT_GETMANAGED)
  235. *node = &entry->node;
  236. else
  237. *node = UA_NULL;
  238. return UA_STATUSCODE_GOOD;
  239. }
  240. UA_StatusCode UA_NodeStore_remove(UA_NodeStore *ns, const UA_NodeId *nodeid) {
  241. hash_t nhash = hash(nodeid);
  242. struct cds_lfht_iter iter;
  243. rcu_read_lock();
  244. cds_lfht_lookup(ns->ht, nhash, compare, &nodeid, &iter);
  245. struct cds_lfht_node *found_htn = cds_lfht_iter_get_node(&iter);
  246. /* If this fails, then the node has already been removed. */
  247. if(!found_htn || cds_lfht_del(ns->ht, found_htn) != 0) {
  248. rcu_read_unlock();
  249. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  250. }
  251. UA_NodeStore_Entry *entry = caa_container_of(found_htn, UA_NodeStore_Entry, htn);
  252. call_rcu(&entry->rcu_head, markDead);
  253. rcu_read_unlock();
  254. return UA_STATUSCODE_GOOD;
  255. }
  256. UA_StatusCode UA_NodeStore_get(const UA_NodeStore *ns, const UA_NodeId *nodeid, const UA_Node **managedNode) {
  257. hash_t nhash = hash(nodeid);
  258. struct cds_lfht_iter iter;
  259. rcu_read_lock();
  260. cds_lfht_lookup(ns->ht, nhash, compare, nodeid, &iter);
  261. UA_NodeStore_Entry *found_entry = (UA_NodeStore_Entry *)cds_lfht_iter_get_node(&iter);
  262. if(!found_entry) {
  263. rcu_read_unlock();
  264. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  265. }
  266. /* This is done within a read-lock. The node will not be marked dead within a read-lock. */
  267. uatomic_inc(&found_entry->readcount);
  268. rcu_read_unlock();
  269. *managedNode = &found_entry->node;
  270. return UA_STATUSCODE_GOOD;
  271. }
  272. void UA_NodeStore_iterate(const UA_NodeStore *ns, UA_NodeStore_nodeVisitor visitor) {
  273. struct cds_lfht *ht = ns->ht;
  274. struct cds_lfht_iter iter;
  275. rcu_read_lock();
  276. cds_lfht_first(ht, &iter);
  277. while(iter.node != UA_NULL) {
  278. UA_NodeStore_Entry *found_entry = (UA_NodeStore_Entry *)cds_lfht_iter_get_node(&iter);
  279. uatomic_inc(&found_entry->readcount);
  280. const UA_Node *node = &found_entry->node;
  281. rcu_read_unlock();
  282. visitor(node);
  283. UA_NodeStore_releaseManagedNode((UA_Node *)node);
  284. rcu_read_lock();
  285. cds_lfht_next(ht, &iter);
  286. }
  287. rcu_read_unlock();
  288. }