ua_nodestore_hashmap.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522
  1. /* This work is licensed under a Creative Commons CCZero 1.0 Universal License.
  2. * See http://creativecommons.org/publicdomain/zero/1.0/ for more information.
  3. *
  4. * Copyright 2014-2019 (c) Fraunhofer IOSB (Author: Julius Pfrommer)
  5. * Copyright 2017 (c) Julian Grothoff
  6. * Copyright 2017 (c) Stefan Profanter, fortiss GmbH
  7. */
  8. #include <open62541/plugin/nodestore_default.h>
  9. #if UA_MULTITHREADING >= 100
  10. #define BEGIN_CRITSECT(NODEMAP) UA_LOCK(NODEMAP->lock)
  11. #define END_CRITSECT(NODEMAP) UA_UNLOCK(NODEMAP->lock)
  12. #else
  13. #define BEGIN_CRITSECT(NODEMAP) do {} while(0)
  14. #define END_CRITSECT(NODEMAP) do {} while(0)
  15. #endif
  16. /* container_of */
  17. #define container_of(ptr, type, member) \
  18. (type *)((uintptr_t)ptr - offsetof(type,member))
  19. /* The default Nodestore is simply a hash-map from NodeIds to Nodes. To find an
  20. * entry, iterate over candidate positions according to the NodeId hash.
  21. *
  22. * - Tombstone or non-matching NodeId: continue searching
  23. * - Matching NodeId: Return the entry
  24. * - NULL: Abort the search
  25. *
  26. * The nodestore uses atomic operations to set entries of the hash-map. If
  27. * UA_ENABLE_IMMUTABLE_NODES is configured, the nodestore allows read-access
  28. * from an interrupt without seeing corrupted nodes. For true multi-threaded
  29. * access, a mutex is used.
  30. *
  31. * Multi-threading without a mutex could be realized with the Linux RCU mechanism.
  32. * But this is not done for this implementation of the nodestore. */
  33. typedef struct UA_NodeMapEntry {
  34. struct UA_NodeMapEntry *orig; /* the version this is a copy from (or NULL) */
  35. UA_UInt16 refCount; /* How many consumers have a reference to the node? */
  36. UA_Boolean deleted; /* Node was marked as deleted and can be deleted when refCount == 0 */
  37. UA_Node node;
  38. } UA_NodeMapEntry;
  39. #define UA_NODEMAP_MINSIZE 64
  40. #define UA_NODEMAP_TOMBSTONE ((UA_NodeMapEntry*)0x01)
  41. typedef struct {
  42. UA_NodeMapEntry **entries;
  43. UA_UInt32 size;
  44. UA_UInt32 count;
  45. UA_UInt32 sizePrimeIndex;
  46. #if UA_MULTITHREADING >= 100
  47. UA_LOCK_TYPE(lock) /* Protect access */
  48. #endif
  49. } UA_NodeMap;
  50. /*********************/
  51. /* HashMap Utilities */
  52. /*********************/
  53. /* The size of the hash-map is always a prime number. They are chosen to be
  54. * close to the next power of 2. So the size ca. doubles with each prime. */
  55. static UA_UInt32 const primes[] = {
  56. 7, 13, 31, 61, 127, 251,
  57. 509, 1021, 2039, 4093, 8191, 16381,
  58. 32749, 65521, 131071, 262139, 524287, 1048573,
  59. 2097143, 4194301, 8388593, 16777213, 33554393, 67108859,
  60. 134217689, 268435399, 536870909, 1073741789, 2147483647, 4294967291
  61. };
  62. static UA_UInt32 mod(UA_UInt32 h, UA_UInt32 size) { return h % size; }
  63. static UA_UInt32 mod2(UA_UInt32 h, UA_UInt32 size) { return 1 + (h % (size - 2)); }
  64. static UA_UInt16
  65. higher_prime_index(UA_UInt32 n) {
  66. UA_UInt16 low = 0;
  67. UA_UInt16 high = (UA_UInt16)(sizeof(primes) / sizeof(UA_UInt32));
  68. while(low != high) {
  69. UA_UInt16 mid = (UA_UInt16)(low + ((high - low) / 2));
  70. if(n > primes[mid])
  71. low = (UA_UInt16)(mid + 1);
  72. else
  73. high = mid;
  74. }
  75. return low;
  76. }
  77. /* Returns an empty slot or null if the nodeid exists or if no empty slot is found. */
  78. static UA_NodeMapEntry **
  79. findFreeSlot(const UA_NodeMap *ns, const UA_NodeId *nodeid) {
  80. UA_NodeMapEntry **retval = NULL;
  81. UA_UInt32 h = UA_NodeId_hash(nodeid);
  82. UA_UInt32 size = ns->size;
  83. UA_UInt64 idx = mod(h, size); /* Use 64bit container to avoid overflow */
  84. UA_UInt32 startIdx = (UA_UInt32)idx;
  85. UA_UInt32 hash2 = mod2(h, size);
  86. UA_NodeMapEntry *entry = NULL;
  87. do {
  88. entry = ns->entries[(UA_UInt32)idx];
  89. if(entry > UA_NODEMAP_TOMBSTONE &&
  90. UA_NodeId_equal(&entry->node.nodeId, nodeid))
  91. return NULL;
  92. if(!retval && entry <= UA_NODEMAP_TOMBSTONE)
  93. retval = &ns->entries[(UA_UInt32)idx];
  94. idx += hash2;
  95. if(idx >= size)
  96. idx -= size;
  97. } while((UA_UInt32)idx != startIdx && entry);
  98. /* NULL is returned if there is no free slot (idx == startIdx).
  99. * Otherwise the first free slot is returned after we are sure,
  100. * that the node id cannot be found in the used hashmap (!entry). */
  101. return retval;
  102. }
  103. /* The occupancy of the table after the call will be about 50% */
  104. static UA_StatusCode
  105. expand(UA_NodeMap *ns) {
  106. UA_UInt32 osize = ns->size;
  107. UA_UInt32 count = ns->count;
  108. /* Resize only when table after removal of unused elements is either too
  109. full or too empty */
  110. if(count * 2 < osize && (count * 8 > osize || osize <= UA_NODEMAP_MINSIZE))
  111. return UA_STATUSCODE_GOOD;
  112. UA_NodeMapEntry **oentries = ns->entries;
  113. UA_UInt32 nindex = higher_prime_index(count * 2);
  114. UA_UInt32 nsize = primes[nindex];
  115. UA_NodeMapEntry **nentries = (UA_NodeMapEntry **)UA_calloc(nsize, sizeof(UA_NodeMapEntry*));
  116. if(!nentries)
  117. return UA_STATUSCODE_BADOUTOFMEMORY;
  118. ns->entries = nentries;
  119. ns->size = nsize;
  120. ns->sizePrimeIndex = nindex;
  121. /* recompute the position of every entry and insert the pointer */
  122. for(size_t i = 0, j = 0; i < osize && j < count; ++i) {
  123. if(oentries[i] <= UA_NODEMAP_TOMBSTONE)
  124. continue;
  125. UA_NodeMapEntry **e = findFreeSlot(ns, &oentries[i]->node.nodeId);
  126. UA_assert(e);
  127. *e = oentries[i];
  128. ++j;
  129. }
  130. UA_free(oentries);
  131. return UA_STATUSCODE_GOOD;
  132. }
  133. static UA_NodeMapEntry *
  134. newEntry(UA_NodeClass nodeClass) {
  135. size_t size = sizeof(UA_NodeMapEntry) - sizeof(UA_Node);
  136. switch(nodeClass) {
  137. case UA_NODECLASS_OBJECT:
  138. size += sizeof(UA_ObjectNode);
  139. break;
  140. case UA_NODECLASS_VARIABLE:
  141. size += sizeof(UA_VariableNode);
  142. break;
  143. case UA_NODECLASS_METHOD:
  144. size += sizeof(UA_MethodNode);
  145. break;
  146. case UA_NODECLASS_OBJECTTYPE:
  147. size += sizeof(UA_ObjectTypeNode);
  148. break;
  149. case UA_NODECLASS_VARIABLETYPE:
  150. size += sizeof(UA_VariableTypeNode);
  151. break;
  152. case UA_NODECLASS_REFERENCETYPE:
  153. size += sizeof(UA_ReferenceTypeNode);
  154. break;
  155. case UA_NODECLASS_DATATYPE:
  156. size += sizeof(UA_DataTypeNode);
  157. break;
  158. case UA_NODECLASS_VIEW:
  159. size += sizeof(UA_ViewNode);
  160. break;
  161. default:
  162. return NULL;
  163. }
  164. UA_NodeMapEntry *entry = (UA_NodeMapEntry*)UA_calloc(1, size);
  165. if(!entry)
  166. return NULL;
  167. entry->node.nodeClass = nodeClass;
  168. return entry;
  169. }
  170. static void
  171. deleteEntry(UA_NodeMapEntry *entry) {
  172. UA_Node_clear(&entry->node);
  173. UA_free(entry);
  174. }
  175. static void
  176. cleanupEntry(UA_NodeMapEntry *entry) {
  177. if(entry->deleted && entry->refCount == 0)
  178. deleteEntry(entry);
  179. }
  180. static UA_StatusCode
  181. clearSlot(UA_NodeMap *ns, UA_NodeMapEntry **slot) {
  182. UA_NodeMapEntry *entry = *slot;
  183. if(UA_atomic_cmpxchg((void**)slot, entry, UA_NODEMAP_TOMBSTONE) != entry)
  184. return UA_STATUSCODE_BADINTERNALERROR;
  185. entry->deleted = true;
  186. cleanupEntry(entry);
  187. --ns->count;
  188. /* Downsize the hashmap if it is very empty */
  189. if(ns->count * 8 < ns->size && ns->size > 32)
  190. expand(ns); /* Can fail. Just continue with the bigger hashmap. */
  191. return UA_STATUSCODE_GOOD;
  192. }
  193. static UA_NodeMapEntry **
  194. findOccupiedSlot(const UA_NodeMap *ns, const UA_NodeId *nodeid) {
  195. UA_UInt32 h = UA_NodeId_hash(nodeid);
  196. UA_UInt32 size = ns->size;
  197. UA_UInt64 idx = mod(h, size); /* Use 64bit container to avoid overflow */
  198. UA_UInt32 hash2 = mod2(h, size);
  199. UA_UInt32 startIdx = (UA_UInt32)idx;
  200. UA_NodeMapEntry *entry = NULL;
  201. do {
  202. entry = ns->entries[(UA_UInt32)idx];
  203. if(entry > UA_NODEMAP_TOMBSTONE &&
  204. UA_NodeId_equal(&entry->node.nodeId, nodeid))
  205. return &ns->entries[(UA_UInt32)idx];
  206. idx += hash2;
  207. if(idx >= size)
  208. idx -= size;
  209. } while((UA_UInt32)idx != startIdx && entry);
  210. /* NULL is returned if there is no free slot (idx == startIdx)
  211. * and the node id is not found or if the end of the used slots (!entry)
  212. * is reached. */
  213. return NULL;
  214. }
  215. /***********************/
  216. /* Interface functions */
  217. /***********************/
  218. static UA_Node *
  219. UA_NodeMap_newNode(void *context, UA_NodeClass nodeClass) {
  220. UA_NodeMapEntry *entry = newEntry(nodeClass);
  221. if(!entry)
  222. return NULL;
  223. return &entry->node;
  224. }
  225. static void
  226. UA_NodeMap_deleteNode(void *context, UA_Node *node) {
  227. #if UA_MULTITHREADING >= 100
  228. UA_NodeMap *ns = (UA_NodeMap*)context;
  229. #endif
  230. BEGIN_CRITSECT(ns);
  231. UA_NodeMapEntry *entry = container_of(node, UA_NodeMapEntry, node);
  232. UA_assert(&entry->node == node);
  233. deleteEntry(entry);
  234. END_CRITSECT(ns);
  235. }
  236. static const UA_Node *
  237. UA_NodeMap_getNode(void *context, const UA_NodeId *nodeid) {
  238. UA_NodeMap *ns = (UA_NodeMap*)context;
  239. BEGIN_CRITSECT(ns);
  240. UA_NodeMapEntry **entry = findOccupiedSlot(ns, nodeid);
  241. if(!entry) {
  242. END_CRITSECT(ns);
  243. return NULL;
  244. }
  245. ++(*entry)->refCount;
  246. END_CRITSECT(ns);
  247. return (const UA_Node*)&(*entry)->node;
  248. }
  249. static void
  250. UA_NodeMap_releaseNode(void *context, const UA_Node *node) {
  251. if (!node)
  252. return;
  253. #if UA_MULTITHREADING >= 100
  254. UA_NodeMap *ns = (UA_NodeMap*)context;
  255. #endif
  256. BEGIN_CRITSECT(ns);
  257. UA_NodeMapEntry *entry = container_of(node, UA_NodeMapEntry, node);
  258. UA_assert(&entry->node == node);
  259. UA_assert(entry->refCount > 0);
  260. --entry->refCount;
  261. cleanupEntry(entry);
  262. END_CRITSECT(ns);
  263. }
  264. static UA_StatusCode
  265. UA_NodeMap_getNodeCopy(void *context, const UA_NodeId *nodeid,
  266. UA_Node **outNode) {
  267. UA_NodeMap *ns = (UA_NodeMap*)context;
  268. BEGIN_CRITSECT(ns);
  269. UA_NodeMapEntry **slot = findOccupiedSlot(ns, nodeid);
  270. if(!slot) {
  271. END_CRITSECT(ns);
  272. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  273. }
  274. UA_NodeMapEntry *entry = *slot;
  275. UA_NodeMapEntry *newItem = newEntry(entry->node.nodeClass);
  276. if(!newItem) {
  277. END_CRITSECT(ns);
  278. return UA_STATUSCODE_BADOUTOFMEMORY;
  279. }
  280. UA_StatusCode retval = UA_Node_copy(&entry->node, &newItem->node);
  281. if(retval == UA_STATUSCODE_GOOD) {
  282. newItem->orig = entry; /* Store the pointer to the original */
  283. *outNode = &newItem->node;
  284. } else {
  285. deleteEntry(newItem);
  286. }
  287. END_CRITSECT(ns);
  288. return retval;
  289. }
  290. static UA_StatusCode
  291. UA_NodeMap_removeNode(void *context, const UA_NodeId *nodeid) {
  292. UA_NodeMap *ns = (UA_NodeMap*)context;
  293. BEGIN_CRITSECT(ns);
  294. UA_NodeMapEntry **slot = findOccupiedSlot(ns, nodeid);
  295. UA_StatusCode retval = UA_STATUSCODE_GOOD;
  296. if(slot)
  297. retval = clearSlot(ns, slot);
  298. else
  299. retval = UA_STATUSCODE_BADNODEIDUNKNOWN;
  300. END_CRITSECT(ns);
  301. return retval;
  302. }
  303. static UA_StatusCode
  304. UA_NodeMap_insertNode(void *context, UA_Node *node,
  305. UA_NodeId *addedNodeId) {
  306. UA_NodeMap *ns = (UA_NodeMap*)context;
  307. BEGIN_CRITSECT(ns);
  308. if(ns->size * 3 <= ns->count * 4) {
  309. if(expand(ns) != UA_STATUSCODE_GOOD) {
  310. END_CRITSECT(ns);
  311. return UA_STATUSCODE_BADINTERNALERROR;
  312. }
  313. }
  314. UA_NodeMapEntry **slot;
  315. if(node->nodeId.identifierType == UA_NODEIDTYPE_NUMERIC &&
  316. node->nodeId.identifier.numeric == 0) {
  317. /* Create a random nodeid: Start at least with 50,000 to make sure we
  318. * don not conflict with nodes from the spec. If we find a conflict, we
  319. * just try another identifier until we have tried all possible
  320. * identifiers. Since the size is prime and we don't change the increase
  321. * val, we will reach the starting id again. E.g. adding a nodeset will
  322. * create children while there are still other nodes which need to be
  323. * created. Thus the node ids may collide. */
  324. UA_UInt32 size = ns->size;
  325. UA_UInt64 identifier = mod(50000 + size+1, UA_UINT32_MAX); /* Use 64bit to
  326. * avoid overflow */
  327. UA_UInt32 increase = mod2(ns->count+1, size);
  328. UA_UInt32 startId = (UA_UInt32)identifier; /* mod ensures us that the id
  329. * is a valid 32 bit integer */
  330. do {
  331. node->nodeId.identifier.numeric = (UA_UInt32)identifier;
  332. slot = findFreeSlot(ns, &node->nodeId);
  333. if(slot)
  334. break;
  335. identifier += increase;
  336. if(identifier >= size)
  337. identifier -= size;
  338. } while((UA_UInt32)identifier != startId);
  339. } else {
  340. slot = findFreeSlot(ns, &node->nodeId);
  341. }
  342. if(!slot) {
  343. deleteEntry(container_of(node, UA_NodeMapEntry, node));
  344. END_CRITSECT(ns);
  345. return UA_STATUSCODE_BADNODEIDEXISTS;
  346. }
  347. /* Copy the NodeId */
  348. UA_StatusCode retval = UA_STATUSCODE_GOOD;
  349. if(addedNodeId) {
  350. retval = UA_NodeId_copy(&node->nodeId, addedNodeId);
  351. if(retval != UA_STATUSCODE_GOOD) {
  352. deleteEntry(container_of(node, UA_NodeMapEntry, node));
  353. END_CRITSECT(ns);
  354. return retval;
  355. }
  356. }
  357. /* Insert the node */
  358. UA_NodeMapEntry *oldEntryContainer = *slot;
  359. UA_NodeMapEntry *newEntryContainer = container_of(node, UA_NodeMapEntry, node);
  360. if(oldEntryContainer > UA_NODEMAP_TOMBSTONE ||
  361. UA_atomic_cmpxchg((void**)slot, oldEntryContainer,
  362. newEntryContainer) != oldEntryContainer) {
  363. deleteEntry(container_of(node, UA_NodeMapEntry, node));
  364. END_CRITSECT(ns);
  365. return UA_STATUSCODE_BADNODEIDEXISTS;
  366. }
  367. ++ns->count;
  368. END_CRITSECT(ns);
  369. return retval;
  370. }
  371. static UA_StatusCode
  372. UA_NodeMap_replaceNode(void *context, UA_Node *node) {
  373. UA_NodeMap *ns = (UA_NodeMap*)context;
  374. UA_NodeMapEntry *newEntryContainer = container_of(node, UA_NodeMapEntry, node);
  375. BEGIN_CRITSECT(ns);
  376. /* Find the node */
  377. UA_NodeMapEntry **slot = findOccupiedSlot(ns, &node->nodeId);
  378. if(!slot) {
  379. deleteEntry(newEntryContainer);
  380. END_CRITSECT(ns);
  381. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  382. }
  383. UA_NodeMapEntry *oldEntryContainer = *slot;
  384. /* The node was already updated since the copy was made? */
  385. if(oldEntryContainer != newEntryContainer->orig) {
  386. deleteEntry(newEntryContainer);
  387. END_CRITSECT(ns);
  388. return UA_STATUSCODE_BADINTERNALERROR;
  389. }
  390. /* Replace the entry with an atomic operation */
  391. if(UA_atomic_cmpxchg((void**)slot, oldEntryContainer,
  392. newEntryContainer) != oldEntryContainer) {
  393. deleteEntry(newEntryContainer);
  394. END_CRITSECT(ns);
  395. return UA_STATUSCODE_BADINTERNALERROR;
  396. }
  397. oldEntryContainer->deleted = true;
  398. cleanupEntry(oldEntryContainer);
  399. END_CRITSECT(ns);
  400. return UA_STATUSCODE_GOOD;
  401. }
  402. static void
  403. UA_NodeMap_iterate(void *context, UA_NodestoreVisitor visitor,
  404. void *visitorContext) {
  405. UA_NodeMap *ns = (UA_NodeMap*)context;
  406. BEGIN_CRITSECT(ns);
  407. for(UA_UInt32 i = 0; i < ns->size; ++i) {
  408. if(ns->entries[i] > UA_NODEMAP_TOMBSTONE) {
  409. END_CRITSECT(ns);
  410. UA_NodeMapEntry *entry = ns->entries[i];
  411. entry->refCount++;
  412. visitor(visitorContext, &entry->node);
  413. entry->refCount--;
  414. cleanupEntry(entry);
  415. BEGIN_CRITSECT(ns);
  416. }
  417. }
  418. END_CRITSECT(ns);
  419. }
  420. static void
  421. UA_NodeMap_delete(void *context) {
  422. UA_NodeMap *ns = (UA_NodeMap*)context;
  423. #if UA_MULTITHREADING >= 100
  424. UA_LOCK_DESTROY(ns->lock);
  425. #endif
  426. UA_UInt32 size = ns->size;
  427. UA_NodeMapEntry **entries = ns->entries;
  428. for(UA_UInt32 i = 0; i < size; ++i) {
  429. if(entries[i] > UA_NODEMAP_TOMBSTONE) {
  430. /* On debugging builds, check that all nodes were release */
  431. UA_assert(entries[i]->refCount == 0);
  432. /* Delete the node */
  433. deleteEntry(entries[i]);
  434. }
  435. }
  436. UA_free(ns->entries);
  437. UA_free(ns);
  438. }
  439. UA_StatusCode
  440. UA_Nodestore_HashMap(UA_Nodestore *ns) {
  441. /* Allocate and initialize the nodemap */
  442. UA_NodeMap *nodemap = (UA_NodeMap*)UA_malloc(sizeof(UA_NodeMap));
  443. if(!nodemap)
  444. return UA_STATUSCODE_BADOUTOFMEMORY;
  445. nodemap->sizePrimeIndex = higher_prime_index(UA_NODEMAP_MINSIZE);
  446. nodemap->size = primes[nodemap->sizePrimeIndex];
  447. nodemap->count = 0;
  448. nodemap->entries = (UA_NodeMapEntry**)
  449. UA_calloc(nodemap->size, sizeof(UA_NodeMapEntry*));
  450. if(!nodemap->entries) {
  451. UA_free(nodemap);
  452. return UA_STATUSCODE_BADOUTOFMEMORY;
  453. }
  454. #if UA_MULTITHREADING >= 100
  455. UA_LOCK_INIT(nodemap->lock)
  456. #endif
  457. /* Populate the nodestore */
  458. ns->context = nodemap;
  459. ns->clear = UA_NodeMap_delete;
  460. ns->newNode = UA_NodeMap_newNode;
  461. ns->deleteNode = UA_NodeMap_deleteNode;
  462. ns->getNode = UA_NodeMap_getNode;
  463. ns->releaseNode = UA_NodeMap_releaseNode;
  464. ns->getNodeCopy = UA_NodeMap_getNodeCopy;
  465. ns->insertNode = UA_NodeMap_insertNode;
  466. ns->replaceNode = UA_NodeMap_replaceNode;
  467. ns->removeNode = UA_NodeMap_removeNode;
  468. ns->iterate = UA_NodeMap_iterate;
  469. return UA_STATUSCODE_GOOD;
  470. }