ua_nodestore.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364
  1. #include "ua_nodestore.h"
  2. #include "ua_util.h"
  3. #include "ua_statuscodes.h"
  4. /* It could happen that we want to delete a node even though a function higher
  5. in the call-chain still has a reference. So we count references and delete
  6. once the count falls to zero. That means we copy every node to a new place
  7. where it is right behind the refcount integer.
  8. Since we copy nodes on the heap, we make the alloc for the nodeEntry bigger
  9. to accommodate for the different nodeclasses (the nodeEntry may have an
  10. overlength "tail"). */
  11. #define ALIVE_BIT (1 << 15) /* Alive bit in the refcount */
  12. struct nodeEntry {
  13. UA_UInt16 refcount;
  14. UA_Node node; // could be const, but then we cannot free it without compilers warnings
  15. };
  16. struct UA_NodeStore {
  17. struct nodeEntry **entries;
  18. UA_UInt32 size;
  19. UA_UInt32 count;
  20. UA_UInt32 sizePrimeIndex;
  21. };
  22. #include "ua_nodestore_hash.inc"
  23. /* The size of the hash-map is always a prime number. They are chosen to be
  24. close to the next power of 2. So the size ca. doubles with each prime. */
  25. static hash_t const primes[] = {
  26. 7, 13, 31, 61, 127, 251,
  27. 509, 1021, 2039, 4093, 8191, 16381,
  28. 32749, 65521, 131071, 262139, 524287, 1048573,
  29. 2097143, 4194301, 8388593, 16777213, 33554393, 67108859,
  30. 134217689, 268435399, 536870909, 1073741789, 2147483647, 4294967291
  31. };
  32. static UA_Int16 higher_prime_index(hash_t n) {
  33. UA_UInt16 low = 0;
  34. UA_UInt16 high = sizeof(primes) / sizeof(hash_t);
  35. while(low != high) {
  36. UA_UInt16 mid = low + (high - low) / 2;
  37. if(n > primes[mid])
  38. low = mid + 1;
  39. else
  40. high = mid;
  41. }
  42. return low;
  43. }
  44. /* Returns UA_TRUE if an entry was found under the nodeid. Otherwise, returns
  45. false and sets slot to a pointer to the next free slot. */
  46. static UA_Boolean containsNodeId(const UA_NodeStore *ns, const UA_NodeId *nodeid, struct nodeEntry ***entry) {
  47. hash_t h = hash(nodeid);
  48. UA_UInt32 size = ns->size;
  49. hash_t index = mod(h, size);
  50. struct nodeEntry **e = &ns->entries[index];
  51. if(*e == UA_NULL) {
  52. *entry = e;
  53. return UA_FALSE;
  54. }
  55. if(UA_NodeId_equal(&(*e)->node.nodeId, nodeid)) {
  56. *entry = e;
  57. return UA_TRUE;
  58. }
  59. hash_t hash2 = mod2(h, size);
  60. for(;;) {
  61. index += hash2;
  62. if(index >= size)
  63. index -= size;
  64. e = &ns->entries[index];
  65. if(*e == UA_NULL) {
  66. *entry = e;
  67. return UA_FALSE;
  68. }
  69. if(UA_NodeId_equal(&(*e)->node.nodeId, nodeid)) {
  70. *entry = e;
  71. return UA_TRUE;
  72. }
  73. }
  74. /* NOTREACHED */
  75. return UA_TRUE;
  76. }
  77. /* The following function changes size of memory allocated for the entries and
  78. repeatedly inserts the table elements. The occupancy of the table after the
  79. call will be about 50%. */
  80. static UA_StatusCode expand(UA_NodeStore *ns) {
  81. UA_Int32 osize = ns->size;
  82. UA_Int32 count = ns->count;
  83. /* Resize only when table after removal of unused elements is either too full or too empty. */
  84. if(count * 2 < osize && (count * 8 > osize || osize <= 32))
  85. return UA_STATUSCODE_GOOD;
  86. UA_UInt32 nindex = higher_prime_index(count * 2);
  87. UA_Int32 nsize = primes[nindex];
  88. struct nodeEntry **nentries;
  89. if(!(nentries = UA_malloc(sizeof(struct nodeEntry *) * nsize)))
  90. return UA_STATUSCODE_BADOUTOFMEMORY;
  91. UA_memset(nentries, 0, nsize * sizeof(struct nodeEntry *));
  92. struct nodeEntry **oentries = ns->entries;
  93. ns->entries = nentries;
  94. ns->size = nsize;
  95. ns->sizePrimeIndex = nindex;
  96. // recompute the position of every entry and insert the pointer
  97. for(UA_Int32 i=0, j=0;i<osize && j<count;i++) {
  98. if(!oentries[i])
  99. continue;
  100. struct nodeEntry **e;
  101. containsNodeId(ns, &(*oentries[i]).node.nodeId, &e); /* We know this returns an empty entry here */
  102. *e = oentries[i];
  103. j++;
  104. }
  105. UA_free(oentries);
  106. return UA_STATUSCODE_GOOD;
  107. }
  108. /* Marks the entry dead and deletes if necessary. */
  109. static void deleteEntry(struct nodeEntry *entry) {
  110. if(entry->refcount > 0)
  111. return;
  112. switch(entry->node.nodeClass) {
  113. case UA_NODECLASS_OBJECT:
  114. UA_ObjectNode_deleteMembers((UA_ObjectNode*)&entry->node);
  115. break;
  116. case UA_NODECLASS_VARIABLE:
  117. UA_VariableNode_deleteMembers((UA_VariableNode*)&entry->node);
  118. break;
  119. case UA_NODECLASS_METHOD:
  120. UA_MethodNode_deleteMembers((UA_MethodNode *)&entry->node);
  121. break;
  122. case UA_NODECLASS_OBJECTTYPE:
  123. UA_ObjectTypeNode_deleteMembers((UA_ObjectTypeNode*)&entry->node);
  124. break;
  125. case UA_NODECLASS_VARIABLETYPE:
  126. UA_VariableTypeNode_deleteMembers((UA_VariableTypeNode*)&entry->node);
  127. break;
  128. case UA_NODECLASS_REFERENCETYPE:
  129. UA_ReferenceTypeNode_deleteMembers((UA_ReferenceTypeNode*)&entry->node);
  130. break;
  131. case UA_NODECLASS_DATATYPE:
  132. UA_DataTypeNode_deleteMembers((UA_DataTypeNode*)&entry->node);
  133. break;
  134. case UA_NODECLASS_VIEW:
  135. UA_ViewNode_deleteMembers((UA_ViewNode*)&entry->node);
  136. break;
  137. default:
  138. UA_assert(UA_FALSE);
  139. break;
  140. }
  141. UA_free(entry);
  142. }
  143. /** Copies the node into the entry. Then free the original node (but not its content). */
  144. static struct nodeEntry * nodeEntryFromNode(UA_Node *node) {
  145. size_t nodesize = 0;
  146. switch(node->nodeClass) {
  147. case UA_NODECLASS_OBJECT:
  148. nodesize = sizeof(UA_ObjectNode);
  149. break;
  150. case UA_NODECLASS_VARIABLE:
  151. nodesize = sizeof(UA_VariableNode);
  152. break;
  153. case UA_NODECLASS_METHOD:
  154. nodesize = sizeof(UA_MethodNode);
  155. break;
  156. case UA_NODECLASS_OBJECTTYPE:
  157. nodesize = sizeof(UA_ObjectTypeNode);
  158. break;
  159. case UA_NODECLASS_VARIABLETYPE:
  160. nodesize = sizeof(UA_VariableTypeNode);
  161. break;
  162. case UA_NODECLASS_REFERENCETYPE:
  163. nodesize = sizeof(UA_ReferenceTypeNode);
  164. break;
  165. case UA_NODECLASS_DATATYPE:
  166. nodesize = sizeof(UA_DataTypeNode);
  167. break;
  168. case UA_NODECLASS_VIEW:
  169. nodesize = sizeof(UA_ViewNode);
  170. break;
  171. default:
  172. UA_assert(UA_FALSE);
  173. }
  174. struct nodeEntry *newEntry;
  175. if(!(newEntry = UA_malloc(sizeof(struct nodeEntry) - sizeof(UA_Node) + nodesize)))
  176. return UA_NULL;
  177. UA_memcpy(&newEntry->node, node, nodesize);
  178. UA_free(node);
  179. return newEntry;
  180. }
  181. /**********************/
  182. /* Exported functions */
  183. /**********************/
  184. UA_NodeStore * UA_NodeStore_new(void) {
  185. UA_NodeStore *ns;
  186. if(!(ns = UA_malloc(sizeof(UA_NodeStore))))
  187. return UA_NULL;
  188. ns->sizePrimeIndex = higher_prime_index(32);
  189. ns->size = primes[ns->sizePrimeIndex];
  190. ns->count = 0;
  191. if(!(ns->entries = UA_malloc(sizeof(struct nodeEntry *) * ns->size))) {
  192. UA_free(ns);
  193. return UA_NULL;
  194. }
  195. UA_memset(ns->entries, 0, ns->size * sizeof(struct nodeEntry *));
  196. return ns;
  197. }
  198. void UA_NodeStore_delete(UA_NodeStore *ns) {
  199. UA_UInt32 size = ns->size;
  200. struct nodeEntry **entries = ns->entries;
  201. for(UA_UInt32 i = 0;i < size;i++) {
  202. if(entries[i] != UA_NULL) {
  203. entries[i]->refcount &= ~ALIVE_BIT; // mark dead
  204. deleteEntry(entries[i]);
  205. entries[i] = UA_NULL;
  206. ns->count--;
  207. }
  208. }
  209. UA_free(ns->entries);
  210. UA_free(ns);
  211. }
  212. UA_StatusCode UA_NodeStore_insert(UA_NodeStore *ns, UA_Node *node, const UA_Node **inserted) {
  213. if(ns->size * 3 <= ns->count * 4) {
  214. if(expand(ns) != UA_STATUSCODE_GOOD)
  215. return UA_STATUSCODE_BADINTERNALERROR;
  216. }
  217. // get a free slot
  218. struct nodeEntry **slot;
  219. //FIXME: a bit dirty workaround of preserving namespace
  220. //namespace index is assumed to be valid
  221. UA_NodeId tempNodeid;
  222. UA_NodeId_copy(&node->nodeId, &tempNodeid);
  223. tempNodeid.namespaceIndex = 0;
  224. if(UA_NodeId_isNull(&tempNodeid)) {
  225. // find a unique nodeid that is not taken
  226. node->nodeId.identifierType = UA_NODEIDTYPE_NUMERIC;
  227. if(node->nodeId.namespaceIndex==0) //original request for ns=0 should yield ns=1
  228. node->nodeId.namespaceIndex=1;
  229. UA_Int32 identifier = ns->count+1; // start value
  230. UA_Int32 size = ns->size;
  231. hash_t increase = mod2(identifier, size);
  232. while(UA_TRUE) {
  233. node->nodeId.identifier.numeric = identifier;
  234. if(!containsNodeId(ns, &node->nodeId, &slot))
  235. break;
  236. identifier += increase;
  237. if(identifier >= size)
  238. identifier -= size;
  239. }
  240. } else {
  241. UA_NodeId_deleteMembers(&tempNodeid);
  242. if(containsNodeId(ns, &node->nodeId, &slot))
  243. return UA_STATUSCODE_BADNODEIDEXISTS;
  244. }
  245. struct nodeEntry *entry = nodeEntryFromNode(node);
  246. if(!entry)
  247. return UA_STATUSCODE_BADOUTOFMEMORY;
  248. *slot = entry;
  249. ns->count++;
  250. if(inserted) {
  251. entry->refcount = ALIVE_BIT + 1;
  252. *inserted = &entry->node;
  253. } else {
  254. entry->refcount = ALIVE_BIT;
  255. }
  256. return UA_STATUSCODE_GOOD;
  257. }
  258. UA_StatusCode UA_NodeStore_replace(UA_NodeStore *ns, const UA_Node *oldNode, UA_Node *node,
  259. const UA_Node **inserted) {
  260. struct nodeEntry **slot;
  261. const UA_NodeId *nodeId = &node->nodeId;
  262. if(!containsNodeId(ns, nodeId, &slot))
  263. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  264. // you try to replace an obsolete node (without threading this can't happen
  265. // if the user doesn't do it deliberately in his code)
  266. if(&(*slot)->node != oldNode)
  267. return UA_STATUSCODE_BADINTERNALERROR;
  268. struct nodeEntry *entry = nodeEntryFromNode(node);
  269. if(!entry)
  270. return UA_STATUSCODE_BADOUTOFMEMORY;
  271. (*slot)->refcount &= ~ALIVE_BIT; // mark dead
  272. *slot = entry;
  273. if(inserted) {
  274. entry->refcount = ALIVE_BIT + 1;
  275. *inserted = &entry->node;
  276. } else {
  277. entry->refcount = ALIVE_BIT;
  278. }
  279. return UA_STATUSCODE_GOOD;
  280. }
  281. const UA_Node * UA_NodeStore_get(const UA_NodeStore *ns, const UA_NodeId *nodeid) {
  282. struct nodeEntry **slot;
  283. if(!containsNodeId(ns, nodeid, &slot))
  284. return UA_NULL;
  285. (*slot)->refcount++;
  286. return &(*slot)->node;
  287. }
  288. UA_StatusCode UA_NodeStore_remove(UA_NodeStore *ns, const UA_NodeId *nodeid) {
  289. struct nodeEntry **slot;
  290. if(!containsNodeId(ns, nodeid, &slot))
  291. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  292. // Check before if deleting the node makes the UA_NodeStore inconsistent.
  293. (*slot)->refcount &= ~ALIVE_BIT; // mark dead
  294. deleteEntry(*slot);
  295. *slot = UA_NULL;
  296. ns->count--;
  297. /* Downsize the hashmap if it is very empty */
  298. if(ns->count * 8 < ns->size && ns->size > 32)
  299. expand(ns); // this can fail. we just continue with the bigger hashmap.
  300. return UA_STATUSCODE_GOOD;
  301. }
  302. void UA_NodeStore_iterate(const UA_NodeStore *ns, UA_NodeStore_nodeVisitor visitor) {
  303. for(UA_UInt32 i = 0;i < ns->size;i++) {
  304. if(ns->entries[i] != UA_NULL)
  305. visitor(&ns->entries[i]->node);
  306. }
  307. }
  308. void UA_NodeStore_release(const UA_Node *managed) {
  309. /* We know what we are doing here and remove a compiler warning. Nobody has
  310. a reference to the const pointer, so we can free it. */
  311. struct nodeEntry *entry = (struct nodeEntry *) ((uintptr_t)managed - offsetof(struct nodeEntry, node));
  312. entry->refcount--;
  313. deleteEntry(entry);
  314. }