ua_nodestore.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. #include "ua_nodestore.h"
  2. #include "ua_util.h"
  3. #include "ua_statuscodes.h"
  4. /* It could happen that we want to delete a node even though a function higher
  5. in the call-chain still has a reference. So we count references and delete
  6. once the count falls to zero. That means we copy every node to a new place
  7. where it is right behind the refcount integer.
  8. Since we copy nodes on the heap, we make the alloc for the nodeEntry bigger
  9. to accommodate for the different nodeclasses (the nodeEntry may have an
  10. overlength "tail"). */
  11. #define ALIVE_BIT (1 << 15) /* Alive bit in the refcount */
  12. struct nodeEntry {
  13. UA_UInt16 refcount;
  14. const UA_Node node;
  15. };
  16. struct UA_NodeStore {
  17. struct nodeEntry **entries;
  18. UA_UInt32 size;
  19. UA_UInt32 count;
  20. UA_UInt32 sizePrimeIndex;
  21. };
  22. /* The size of the hash-map is always a prime number. They are chosen to be
  23. close to the next power of 2. So the size ca. doubles with each prime. */
  24. typedef UA_UInt32 hash_t;
  25. static hash_t const primes[] = {
  26. 7, 13, 31, 61, 127, 251,
  27. 509, 1021, 2039, 4093, 8191, 16381,
  28. 32749, 65521, 131071, 262139, 524287, 1048573,
  29. 2097143, 4194301, 8388593, 16777213, 33554393, 67108859,
  30. 134217689, 268435399, 536870909, 1073741789, 2147483647, 4294967291
  31. };
  32. static INLINE hash_t mod(hash_t h, hash_t size) { return h % size; }
  33. static INLINE hash_t mod2(hash_t h, hash_t size) { return 1 + (h % (size - 2)); }
  34. static INLINE UA_Int16 higher_prime_index(hash_t n) {
  35. UA_UInt16 low = 0;
  36. UA_UInt16 high = sizeof(primes) / sizeof(hash_t);
  37. while(low != high) {
  38. UA_UInt16 mid = low + (high - low) / 2;
  39. if(n > primes[mid])
  40. low = mid + 1;
  41. else
  42. high = mid;
  43. }
  44. return low;
  45. }
  46. /* Based on Murmur-Hash 3 by Austin Appleby (public domain, freely usable) */
  47. static hash_t hash_array(const UA_Byte *data, UA_UInt32 len, UA_UInt32 seed) {
  48. if(data == UA_NULL)
  49. return 0;
  50. const int32_t nblocks = len / 4;
  51. const uint32_t *blocks;
  52. static const uint32_t c1 = 0xcc9e2d51;
  53. static const uint32_t c2 = 0x1b873593;
  54. static const uint32_t r1 = 15;
  55. static const uint32_t r2 = 13;
  56. static const uint32_t m = 5;
  57. static const uint32_t n = 0xe6546b64;
  58. hash_t hash = seed;
  59. blocks = (const uint32_t *)data;
  60. for(int32_t i = 0;i < nblocks;i++) {
  61. uint32_t k = blocks[i];
  62. k *= c1;
  63. k = (k << r1) | (k >> (32 - r1));
  64. k *= c2;
  65. hash ^= k;
  66. hash = ((hash << r2) | (hash >> (32 - r2))) * m + n;
  67. }
  68. const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);
  69. uint32_t k1 = 0;
  70. switch(len & 3) {
  71. case 3:
  72. k1 ^= tail[2] << 16;
  73. case 2:
  74. k1 ^= tail[1] << 8;
  75. case 1:
  76. k1 ^= tail[0];
  77. k1 *= c1;
  78. k1 = (k1 << r1) | (k1 >> (32 - r1));
  79. k1 *= c2;
  80. hash ^= k1;
  81. }
  82. hash ^= len;
  83. hash ^= (hash >> 16);
  84. hash *= 0x85ebca6b;
  85. hash ^= (hash >> 13);
  86. hash *= 0xc2b2ae35;
  87. hash ^= (hash >> 16);
  88. return hash;
  89. }
  90. static hash_t hash(const UA_NodeId *n) {
  91. switch(n->identifierType) {
  92. case UA_NODEIDTYPE_NUMERIC:
  93. /* Knuth's multiplicative hashing */
  94. return (n->identifier.numeric + n->namespaceIndex) * 2654435761; // mod(2^32) is implicit
  95. case UA_NODEIDTYPE_STRING:
  96. return hash_array(n->identifier.string.data, n->identifier.string.length, n->namespaceIndex);
  97. case UA_NODEIDTYPE_GUID:
  98. return hash_array((UA_Byte *)&(n->identifier.guid), sizeof(UA_Guid), n->namespaceIndex);
  99. case UA_NODEIDTYPE_BYTESTRING:
  100. return hash_array((UA_Byte *)n->identifier.byteString.data, n->identifier.byteString.length, n->namespaceIndex);
  101. default:
  102. UA_assert(UA_FALSE);
  103. return 0;
  104. }
  105. }
  106. /* Returns UA_TRUE if an entry was found under the nodeid. Otherwise, returns
  107. false and sets slot to a pointer to the next free slot. */
  108. static UA_Boolean __containsNodeId(const UA_NodeStore *ns, const UA_NodeId *nodeid, struct nodeEntry ***entry) {
  109. hash_t h = hash(nodeid);
  110. UA_UInt32 size = ns->size;
  111. hash_t index = mod(h, size);
  112. struct nodeEntry **e = &ns->entries[index];
  113. if(*e == UA_NULL) {
  114. *entry = e;
  115. return UA_FALSE;
  116. }
  117. if(UA_NodeId_equal(&(*e)->node.nodeId, nodeid)) {
  118. *entry = e;
  119. return UA_TRUE;
  120. }
  121. hash_t hash2 = mod2(h, size);
  122. for(;;) {
  123. index += hash2;
  124. if(index >= size)
  125. index -= size;
  126. e = &ns->entries[index];
  127. if(*e == UA_NULL) {
  128. *entry = e;
  129. return UA_FALSE;
  130. }
  131. if(UA_NodeId_equal(&(*e)->node.nodeId, nodeid)) {
  132. *entry = e;
  133. return UA_TRUE;
  134. }
  135. }
  136. /* NOTREACHED */
  137. return UA_TRUE;
  138. }
  139. /* The following function changes size of memory allocated for the entries and
  140. repeatedly inserts the table elements. The occupancy of the table after the
  141. call will be about 50%. */
  142. static UA_StatusCode __expand(UA_NodeStore *ns) {
  143. UA_Int32 osize = ns->size;
  144. UA_Int32 count = ns->count;
  145. /* Resize only when table after removal of unused elements is either too full or too empty. */
  146. if(count * 2 < osize && (count * 8 > osize || osize <= 32))
  147. return UA_STATUSCODE_GOOD;
  148. UA_UInt32 nindex = higher_prime_index(count * 2);
  149. UA_Int32 nsize = primes[nindex];
  150. struct nodeEntry **nentries;
  151. if(!(nentries = UA_alloc(sizeof(struct nodeEntry *) * nsize)))
  152. return UA_STATUSCODE_BADOUTOFMEMORY;
  153. memset(nentries, 0, nsize * sizeof(struct nodeEntry *));
  154. struct nodeEntry **oentries = ns->entries;
  155. ns->entries = nentries;
  156. ns->size = nsize;
  157. ns->sizePrimeIndex = nindex;
  158. // recompute the position of every entry and insert the pointer
  159. for(UA_Int32 i=0, j=0;i<osize && j<count;i++) {
  160. if(!oentries[i])
  161. continue;
  162. struct nodeEntry **e;
  163. __containsNodeId(ns, &(*oentries[i]).node.nodeId, &e); /* We know this returns an empty entry here */
  164. *e = oentries[i];
  165. j++;
  166. }
  167. UA_free(oentries);
  168. return UA_STATUSCODE_GOOD;
  169. }
  170. /* Marks the entry dead and deletes if necessary. */
  171. static void __deleteEntry(struct nodeEntry *entry) {
  172. if(entry->refcount > 0)
  173. return;
  174. const UA_Node *node = &entry->node;
  175. switch(node->nodeClass) {
  176. case UA_NODECLASS_OBJECT:
  177. UA_ObjectNode_deleteMembers((UA_ObjectNode *)node);
  178. break;
  179. case UA_NODECLASS_VARIABLE:
  180. UA_VariableNode_deleteMembers((UA_VariableNode *)node);
  181. break;
  182. case UA_NODECLASS_METHOD:
  183. UA_MethodNode_deleteMembers((UA_MethodNode *)node);
  184. break;
  185. case UA_NODECLASS_OBJECTTYPE:
  186. UA_ObjectTypeNode_deleteMembers((UA_ObjectTypeNode *)node);
  187. break;
  188. case UA_NODECLASS_VARIABLETYPE:
  189. UA_VariableTypeNode_deleteMembers((UA_VariableTypeNode *)node);
  190. break;
  191. case UA_NODECLASS_REFERENCETYPE:
  192. UA_ReferenceTypeNode_deleteMembers((UA_ReferenceTypeNode *)node);
  193. break;
  194. case UA_NODECLASS_DATATYPE:
  195. UA_DataTypeNode_deleteMembers((UA_DataTypeNode *)node);
  196. break;
  197. case UA_NODECLASS_VIEW:
  198. UA_ViewNode_deleteMembers((UA_ViewNode *)node);
  199. break;
  200. default:
  201. UA_assert(UA_FALSE);
  202. break;
  203. }
  204. UA_free(entry);
  205. }
  206. static INLINE struct nodeEntry * __nodeEntryFromNode(const UA_Node *node) {
  207. UA_UInt32 nodesize = 0;
  208. /* Copy the node into the entry. Then reset the original node. It shall no longer be used. */
  209. switch(node->nodeClass) {
  210. case UA_NODECLASS_OBJECT:
  211. nodesize = sizeof(UA_ObjectNode);
  212. break;
  213. case UA_NODECLASS_VARIABLE:
  214. nodesize = sizeof(UA_VariableNode);
  215. break;
  216. case UA_NODECLASS_METHOD:
  217. nodesize = sizeof(UA_MethodNode);
  218. break;
  219. case UA_NODECLASS_OBJECTTYPE:
  220. nodesize = sizeof(UA_ObjectTypeNode);
  221. break;
  222. case UA_NODECLASS_VARIABLETYPE:
  223. nodesize = sizeof(UA_VariableTypeNode);
  224. break;
  225. case UA_NODECLASS_REFERENCETYPE:
  226. nodesize = sizeof(UA_ReferenceTypeNode);
  227. break;
  228. case UA_NODECLASS_DATATYPE:
  229. nodesize = sizeof(UA_DataTypeNode);
  230. break;
  231. case UA_NODECLASS_VIEW:
  232. nodesize = sizeof(UA_ViewNode);
  233. break;
  234. default:
  235. UA_assert(UA_FALSE);
  236. }
  237. struct nodeEntry *entry;
  238. if(!(entry = UA_alloc(sizeof(struct nodeEntry) - sizeof(UA_Node) + nodesize)))
  239. return UA_NULL;
  240. memcpy((void *)&entry->node, node, nodesize);
  241. UA_free((void*)node);
  242. return entry;
  243. }
  244. /**********************/
  245. /* Exported functions */
  246. /**********************/
  247. UA_NodeStore * UA_NodeStore_new() {
  248. UA_NodeStore *ns;
  249. if(!(ns = UA_alloc(sizeof(UA_NodeStore))))
  250. return UA_NULL;
  251. ns->sizePrimeIndex = higher_prime_index(32);
  252. ns->size = primes[ns->sizePrimeIndex];
  253. ns->count = 0;
  254. if(!(ns->entries = UA_alloc(sizeof(struct nodeEntry *) * ns->size))) {
  255. UA_free(ns);
  256. return UA_NULL;
  257. }
  258. memset(ns->entries, 0, ns->size * sizeof(struct nodeEntry *));
  259. return ns;
  260. }
  261. void UA_NodeStore_delete(UA_NodeStore *ns) {
  262. UA_UInt32 size = ns->size;
  263. struct nodeEntry **entries = ns->entries;
  264. for(UA_UInt32 i = 0;i < size;i++) {
  265. if(entries[i] != UA_NULL) {
  266. entries[i]->refcount &= ~ALIVE_BIT; // mark dead
  267. __deleteEntry(entries[i]);
  268. entries[i] = UA_NULL;
  269. ns->count--;
  270. }
  271. }
  272. UA_free(ns->entries);
  273. UA_free(ns);
  274. }
  275. UA_StatusCode UA_NodeStore_insert(UA_NodeStore *ns, const UA_Node **node, UA_Boolean getManaged) {
  276. if(ns->size * 3 <= ns->count * 4) {
  277. if(__expand(ns) != UA_STATUSCODE_GOOD)
  278. return UA_STATUSCODE_BADINTERNALERROR;
  279. }
  280. // get a free slot
  281. struct nodeEntry **slot;
  282. UA_NodeId *nodeId = (UA_NodeId *)&(*node)->nodeId;
  283. if(UA_NodeId_isNull(nodeId)) {
  284. // find a unique nodeid that is not taken
  285. nodeId->identifierType = UA_NODEIDTYPE_NUMERIC;
  286. nodeId->namespaceIndex = 1; // namespace 1 is always in the local nodestore
  287. UA_Int32 identifier = ns->count+1; // start value
  288. UA_Int32 size = ns->size;
  289. hash_t increase = mod2(identifier, size);
  290. while(UA_TRUE) {
  291. nodeId->identifier.numeric = identifier;
  292. if(!__containsNodeId(ns, nodeId, &slot))
  293. break;
  294. identifier += increase;
  295. if(identifier >= size)
  296. identifier -= size;
  297. }
  298. } else {
  299. if(__containsNodeId(ns, nodeId, &slot))
  300. return UA_STATUSCODE_BADNODEIDEXISTS;
  301. }
  302. struct nodeEntry *entry = __nodeEntryFromNode(*node);
  303. if(!entry)
  304. return UA_STATUSCODE_BADOUTOFMEMORY;
  305. *slot = entry;
  306. ns->count++;
  307. if(getManaged) {
  308. entry->refcount = ALIVE_BIT + 1;
  309. *node = &entry->node;
  310. } else {
  311. entry->refcount = ALIVE_BIT;
  312. *node = UA_NULL;
  313. }
  314. return UA_STATUSCODE_GOOD;
  315. }
  316. UA_StatusCode UA_NodeStore_replace(UA_NodeStore *ns, const UA_Node **node, UA_Boolean getManaged) {
  317. struct nodeEntry **slot;
  318. const UA_NodeId *nodeId = &(*node)->nodeId;
  319. if(!__containsNodeId(ns, nodeId, &slot))
  320. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  321. struct nodeEntry *entry = __nodeEntryFromNode(*node);
  322. if(!entry)
  323. return UA_STATUSCODE_BADOUTOFMEMORY;
  324. (*slot)->refcount &= ~ALIVE_BIT; // mark dead
  325. __deleteEntry(*slot);
  326. *slot = entry;
  327. if(getManaged) {
  328. entry->refcount = ALIVE_BIT + 1;
  329. *node = &entry->node;
  330. } else {
  331. entry->refcount = ALIVE_BIT;
  332. *node = UA_NULL;
  333. }
  334. return UA_STATUSCODE_GOOD;
  335. }
  336. const UA_Node * UA_NodeStore_get(const UA_NodeStore *ns, const UA_NodeId *nodeid) {
  337. struct nodeEntry **slot;
  338. if(!__containsNodeId(ns, nodeid, &slot))
  339. return UA_NULL;
  340. (*slot)->refcount++;
  341. return &(*slot)->node;
  342. }
  343. UA_StatusCode UA_NodeStore_remove(UA_NodeStore *ns, const UA_NodeId *nodeid) {
  344. struct nodeEntry **slot;
  345. if(!__containsNodeId(ns, nodeid, &slot))
  346. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  347. // Check before if deleting the node makes the UA_NodeStore inconsistent.
  348. (*slot)->refcount &= ~ALIVE_BIT; // mark dead
  349. __deleteEntry(*slot);
  350. *slot = UA_NULL;
  351. ns->count--;
  352. /* Downsize the hashmap if it is very empty */
  353. if(ns->count * 8 < ns->size && ns->size > 32)
  354. __expand(ns); // this can fail. we just continue with the bigger hashmap.
  355. return UA_STATUSCODE_GOOD;
  356. }
  357. void UA_NodeStore_iterate(const UA_NodeStore *ns, UA_NodeStore_nodeVisitor visitor) {
  358. for(UA_UInt32 i = 0;i < ns->size;i++) {
  359. if(ns->entries[i] != UA_NULL)
  360. visitor(&ns->entries[i]->node);
  361. }
  362. }
  363. void UA_NodeStore_release(const UA_Node *managed) {
  364. struct nodeEntry *entry = (struct nodeEntry *) ((char*)managed - offsetof(struct nodeEntry, node));
  365. entry->refcount--;
  366. __deleteEntry(entry);
  367. }