ua_nodestore_default.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. /* This work is licensed under a Creative Commons CCZero 1.0 Universal License.
  2. * See http://creativecommons.org/publicdomain/zero/1.0/ for more information.
  3. *
  4. * Copyright 2014-2017 (c) Fraunhofer IOSB (Author: Julius Pfrommer)
  5. * Copyright 2017 (c) Julian Grothoff
  6. * Copyright 2017 (c) Stefan Profanter, fortiss GmbH
  7. */
  8. #include "ua_nodestore_default.h"
  9. #include "../src/ua_util_internal.h" /* TOOO: Move atomic operations to arch definitions */
  10. /* container_of */
  11. #define container_of(ptr, type, member) \
  12. (type *)((uintptr_t)ptr - offsetof(type,member))
  13. #ifdef UA_ENABLE_MULTITHREADING
  14. #include <pthread.h>
  15. #define BEGIN_CRITSECT(NODEMAP) pthread_mutex_lock(&(NODEMAP)->mutex)
  16. #define END_CRITSECT(NODEMAP) pthread_mutex_unlock(&(NODEMAP)->mutex)
  17. #else
  18. #define BEGIN_CRITSECT(NODEMAP)
  19. #define END_CRITSECT(NODEMAP)
  20. #endif
  21. /* The default Nodestore is simply a hash-map from NodeIds to Nodes. To find an
  22. * entry, iterate over candidate positions according to the NodeId hash.
  23. *
  24. * - Tombstone or non-matching NodeId: continue searching
  25. * - Matching NodeId: Return the entry
  26. * - NULL: Abort the search
  27. *
  28. * The nodestore uses atomic operations to set entries of the hash-map. If
  29. * UA_ENABLE_IMMUTABLE_NODES is configured, the nodestore allows read-access
  30. * from an interrupt without seeing corrupted nodes. For true multi-threaded
  31. * access, a mutex is used.
  32. *
  33. * Multi-threading without a mutex could be realized with the Linux RCU mechanism.
  34. * But this is not done for this implementation of the nodestore. */
  35. typedef struct UA_NodeMapEntry {
  36. struct UA_NodeMapEntry *orig; /* the version this is a copy from (or NULL) */
  37. UA_UInt16 refCount; /* How many consumers have a reference to the node? */
  38. UA_Boolean deleted; /* Node was marked as deleted and can be deleted when refCount == 0 */
  39. UA_Node node;
  40. } UA_NodeMapEntry;
  41. #define UA_NODEMAP_MINSIZE 64
  42. #define UA_NODEMAP_TOMBSTONE ((UA_NodeMapEntry*)0x01)
  43. typedef struct {
  44. UA_NodeMapEntry **entries;
  45. UA_UInt32 size;
  46. UA_UInt32 count;
  47. UA_UInt32 sizePrimeIndex;
  48. #ifdef UA_ENABLE_MULTITHREADING
  49. pthread_mutex_t mutex; /* Protect access */
  50. #endif
  51. } UA_NodeMap;
  52. /*********************/
  53. /* HashMap Utilities */
  54. /*********************/
  55. /* The size of the hash-map is always a prime number. They are chosen to be
  56. * close to the next power of 2. So the size ca. doubles with each prime. */
  57. static UA_UInt32 const primes[] = {
  58. 7, 13, 31, 61, 127, 251,
  59. 509, 1021, 2039, 4093, 8191, 16381,
  60. 32749, 65521, 131071, 262139, 524287, 1048573,
  61. 2097143, 4194301, 8388593, 16777213, 33554393, 67108859,
  62. 134217689, 268435399, 536870909, 1073741789, 2147483647, 4294967291
  63. };
  64. static UA_UInt32 mod(UA_UInt32 h, UA_UInt32 size) { return h % size; }
  65. static UA_UInt32 mod2(UA_UInt32 h, UA_UInt32 size) { return 1 + (h % (size - 2)); }
  66. static UA_UInt16
  67. higher_prime_index(UA_UInt32 n) {
  68. UA_UInt16 low = 0;
  69. UA_UInt16 high = (UA_UInt16)(sizeof(primes) / sizeof(UA_UInt32));
  70. while(low != high) {
  71. UA_UInt16 mid = (UA_UInt16)(low + ((high - low) / 2));
  72. if(n > primes[mid])
  73. low = (UA_UInt16)(mid + 1);
  74. else
  75. high = mid;
  76. }
  77. return low;
  78. }
  79. /* returns an empty slot or null if the nodeid exists or if no empty slot is found. */
  80. static UA_NodeMapEntry **
  81. findFreeSlot(const UA_NodeMap *ns, const UA_NodeId *nodeid) {
  82. UA_NodeMapEntry **retval = NULL;
  83. UA_UInt32 h = UA_NodeId_hash(nodeid);
  84. UA_UInt32 size = ns->size;
  85. UA_UInt64 idx = mod(h, size); // use 64 bit container to avoid overflow
  86. UA_UInt32 startIdx = (UA_UInt32)idx;
  87. UA_UInt32 hash2 = mod2(h, size);
  88. UA_NodeMapEntry *entry = NULL;
  89. do {
  90. entry = ns->entries[(UA_UInt32)idx];
  91. if(entry > UA_NODEMAP_TOMBSTONE &&
  92. UA_NodeId_equal(&entry->node.nodeId, nodeid))
  93. return NULL;
  94. if(!retval && entry <= UA_NODEMAP_TOMBSTONE)
  95. retval = &ns->entries[(UA_UInt32)idx];
  96. idx += hash2;
  97. if(idx >= size)
  98. idx -= size;
  99. } while((UA_UInt32)idx != startIdx && entry);
  100. /* NULL is returned if there is no free slot (idx == startIdx).
  101. * Otherwise the first free slot is returned after we are sure,
  102. * that the node id cannot be found in the used hashmap (!entry). */
  103. return retval;
  104. }
  105. /* The occupancy of the table after the call will be about 50% */
  106. static UA_StatusCode
  107. expand(UA_NodeMap *ns) {
  108. UA_UInt32 osize = ns->size;
  109. UA_UInt32 count = ns->count;
  110. /* Resize only when table after removal of unused elements is either too
  111. full or too empty */
  112. if(count * 2 < osize && (count * 8 > osize || osize <= UA_NODEMAP_MINSIZE))
  113. return UA_STATUSCODE_GOOD;
  114. UA_NodeMapEntry **oentries = ns->entries;
  115. UA_UInt32 nindex = higher_prime_index(count * 2);
  116. UA_UInt32 nsize = primes[nindex];
  117. UA_NodeMapEntry **nentries = (UA_NodeMapEntry **)UA_calloc(nsize, sizeof(UA_NodeMapEntry*));
  118. if(!nentries)
  119. return UA_STATUSCODE_BADOUTOFMEMORY;
  120. ns->entries = nentries;
  121. ns->size = nsize;
  122. ns->sizePrimeIndex = nindex;
  123. /* recompute the position of every entry and insert the pointer */
  124. for(size_t i = 0, j = 0; i < osize && j < count; ++i) {
  125. if(oentries[i] <= UA_NODEMAP_TOMBSTONE)
  126. continue;
  127. UA_NodeMapEntry **e = findFreeSlot(ns, &oentries[i]->node.nodeId);
  128. UA_assert(e);
  129. *e = oentries[i];
  130. ++j;
  131. }
  132. UA_free(oentries);
  133. return UA_STATUSCODE_GOOD;
  134. }
  135. static UA_NodeMapEntry *
  136. newEntry(UA_NodeClass nodeClass) {
  137. size_t size = sizeof(UA_NodeMapEntry) - sizeof(UA_Node);
  138. switch(nodeClass) {
  139. case UA_NODECLASS_OBJECT:
  140. size += sizeof(UA_ObjectNode);
  141. break;
  142. case UA_NODECLASS_VARIABLE:
  143. size += sizeof(UA_VariableNode);
  144. break;
  145. case UA_NODECLASS_METHOD:
  146. size += sizeof(UA_MethodNode);
  147. break;
  148. case UA_NODECLASS_OBJECTTYPE:
  149. size += sizeof(UA_ObjectTypeNode);
  150. break;
  151. case UA_NODECLASS_VARIABLETYPE:
  152. size += sizeof(UA_VariableTypeNode);
  153. break;
  154. case UA_NODECLASS_REFERENCETYPE:
  155. size += sizeof(UA_ReferenceTypeNode);
  156. break;
  157. case UA_NODECLASS_DATATYPE:
  158. size += sizeof(UA_DataTypeNode);
  159. break;
  160. case UA_NODECLASS_VIEW:
  161. size += sizeof(UA_ViewNode);
  162. break;
  163. default:
  164. return NULL;
  165. }
  166. UA_NodeMapEntry *entry = (UA_NodeMapEntry*)UA_calloc(1, size);
  167. if(!entry)
  168. return NULL;
  169. entry->node.nodeClass = nodeClass;
  170. return entry;
  171. }
  172. static void
  173. deleteEntry(UA_NodeMapEntry *entry) {
  174. UA_Node_deleteMembers(&entry->node);
  175. UA_free(entry);
  176. }
  177. static void
  178. cleanupEntry(UA_NodeMapEntry *entry) {
  179. if(entry->deleted && entry->refCount == 0)
  180. deleteEntry(entry);
  181. }
  182. static UA_StatusCode
  183. clearSlot(UA_NodeMap *ns, UA_NodeMapEntry **slot) {
  184. UA_NodeMapEntry *entry = *slot;
  185. if(UA_atomic_cmpxchg((void**)slot, entry, UA_NODEMAP_TOMBSTONE) != entry)
  186. return UA_STATUSCODE_BADINTERNALERROR;
  187. entry->deleted = true;
  188. cleanupEntry(entry);
  189. --ns->count;
  190. /* Downsize the hashmap if it is very empty */
  191. if(ns->count * 8 < ns->size && ns->size > 32)
  192. expand(ns); /* Can fail. Just continue with the bigger hashmap. */
  193. return UA_STATUSCODE_GOOD;
  194. }
  195. static UA_NodeMapEntry **
  196. findOccupiedSlot(const UA_NodeMap *ns, const UA_NodeId *nodeid) {
  197. UA_UInt32 h = UA_NodeId_hash(nodeid);
  198. UA_UInt32 size = ns->size;
  199. UA_UInt64 idx = mod(h, size); // use 64 bit container to avoid overflow
  200. UA_UInt32 hash2 = mod2(h, size);
  201. UA_UInt32 startIdx = (UA_UInt32)idx;
  202. UA_NodeMapEntry *entry = NULL;
  203. do {
  204. entry = ns->entries[(UA_UInt32)idx];
  205. if(entry > UA_NODEMAP_TOMBSTONE &&
  206. UA_NodeId_equal(&entry->node.nodeId, nodeid))
  207. return &ns->entries[(UA_UInt32)idx];
  208. idx += hash2;
  209. if(idx >= size)
  210. idx -= size;
  211. } while((UA_UInt32)idx != startIdx && entry);
  212. /* NULL is returned if there is no free slot (idx == startIdx)
  213. * and the node id is not found or if the end of the used slots (!entry)
  214. * is reached. */
  215. return NULL;
  216. }
  217. /***********************/
  218. /* Interface functions */
  219. /***********************/
  220. static UA_Node *
  221. UA_NodeMap_newNode(void *context, UA_NodeClass nodeClass) {
  222. UA_NodeMapEntry *entry = newEntry(nodeClass);
  223. if(!entry)
  224. return NULL;
  225. return &entry->node;
  226. }
  227. static void
  228. UA_NodeMap_deleteNode(void *context, UA_Node *node) {
  229. #ifdef UA_ENABLE_MULTITHREADING
  230. UA_NodeMap *ns = (UA_NodeMap*)context;
  231. #endif
  232. BEGIN_CRITSECT(ns);
  233. UA_NodeMapEntry *entry = container_of(node, UA_NodeMapEntry, node);
  234. UA_assert(&entry->node == node);
  235. deleteEntry(entry);
  236. END_CRITSECT(ns);
  237. }
  238. static const UA_Node *
  239. UA_NodeMap_getNode(void *context, const UA_NodeId *nodeid) {
  240. UA_NodeMap *ns = (UA_NodeMap*)context;
  241. BEGIN_CRITSECT(ns);
  242. UA_NodeMapEntry **entry = findOccupiedSlot(ns, nodeid);
  243. if(!entry) {
  244. END_CRITSECT(ns);
  245. return NULL;
  246. }
  247. ++(*entry)->refCount;
  248. END_CRITSECT(ns);
  249. return (const UA_Node*)&(*entry)->node;
  250. }
  251. static void
  252. UA_NodeMap_releaseNode(void *context, const UA_Node *node) {
  253. if (!node)
  254. return;
  255. #ifdef UA_ENABLE_MULTITHREADING
  256. UA_NodeMap *ns = (UA_NodeMap*)context;
  257. #endif
  258. BEGIN_CRITSECT(ns);
  259. UA_NodeMapEntry *entry = container_of(node, UA_NodeMapEntry, node);
  260. UA_assert(&entry->node == node);
  261. UA_assert(entry->refCount > 0);
  262. --entry->refCount;
  263. cleanupEntry(entry);
  264. END_CRITSECT(ns);
  265. }
  266. static UA_StatusCode
  267. UA_NodeMap_getNodeCopy(void *context, const UA_NodeId *nodeid,
  268. UA_Node **outNode) {
  269. UA_NodeMap *ns = (UA_NodeMap*)context;
  270. BEGIN_CRITSECT(ns);
  271. UA_NodeMapEntry **slot = findOccupiedSlot(ns, nodeid);
  272. if(!slot) {
  273. END_CRITSECT(ns);
  274. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  275. }
  276. UA_NodeMapEntry *entry = *slot;
  277. UA_NodeMapEntry *newItem = newEntry(entry->node.nodeClass);
  278. if(!newItem) {
  279. END_CRITSECT(ns);
  280. return UA_STATUSCODE_BADOUTOFMEMORY;
  281. }
  282. UA_StatusCode retval = UA_Node_copy(&entry->node, &newItem->node);
  283. if(retval == UA_STATUSCODE_GOOD) {
  284. newItem->orig = entry; // store the pointer to the original
  285. *outNode = &newItem->node;
  286. } else {
  287. deleteEntry(newItem);
  288. }
  289. END_CRITSECT(ns);
  290. return retval;
  291. }
  292. static UA_StatusCode
  293. UA_NodeMap_removeNode(void *context, const UA_NodeId *nodeid) {
  294. UA_NodeMap *ns = (UA_NodeMap*)context;
  295. BEGIN_CRITSECT(ns);
  296. UA_NodeMapEntry **slot = findOccupiedSlot(ns, nodeid);
  297. UA_StatusCode retval = UA_STATUSCODE_GOOD;
  298. if(slot)
  299. retval = clearSlot(ns, slot);
  300. else
  301. retval = UA_STATUSCODE_BADNODEIDUNKNOWN;
  302. END_CRITSECT(ns);
  303. return retval;
  304. }
  305. static UA_StatusCode
  306. UA_NodeMap_insertNode(void *context, UA_Node *node,
  307. UA_NodeId *addedNodeId) {
  308. UA_NodeMap *ns = (UA_NodeMap*)context;
  309. BEGIN_CRITSECT(ns);
  310. if(ns->size * 3 <= ns->count * 4) {
  311. if(expand(ns) != UA_STATUSCODE_GOOD) {
  312. END_CRITSECT(ns);
  313. return UA_STATUSCODE_BADINTERNALERROR;
  314. }
  315. }
  316. UA_NodeMapEntry **slot;
  317. if(node->nodeId.identifierType == UA_NODEIDTYPE_NUMERIC &&
  318. node->nodeId.identifier.numeric == 0) {
  319. /* create a random nodeid */
  320. /* start at least with 50,000 to make sure we don not conflict with nodes from the spec */
  321. /* if we find a conflict, we just try another identifier until we have tried all possible identifiers */
  322. /* since the size is prime and we don't change the increase val, we will reach the starting id again */
  323. /* E.g. adding a nodeset will create children while there are still other nodes which need to be created */
  324. /* Thus the node ids may collide */
  325. UA_UInt32 size = ns->size;
  326. UA_UInt64 identifier = mod(50000 + size+1, UA_UINT32_MAX); // start value, use 64 bit container to avoid overflow
  327. UA_UInt32 increase = mod2(ns->count+1, size);
  328. UA_UInt32 startId = (UA_UInt32)identifier; // mod ensures us that the id is a valid 32 bit
  329. do {
  330. node->nodeId.identifier.numeric = (UA_UInt32)identifier;
  331. slot = findFreeSlot(ns, &node->nodeId);
  332. if(slot)
  333. break;
  334. identifier += increase;
  335. if(identifier >= size)
  336. identifier -= size;
  337. } while((UA_UInt32)identifier != startId);
  338. } else {
  339. slot = findFreeSlot(ns, &node->nodeId);
  340. }
  341. if(!slot) {
  342. deleteEntry(container_of(node, UA_NodeMapEntry, node));
  343. END_CRITSECT(ns);
  344. return UA_STATUSCODE_BADNODEIDEXISTS;
  345. }
  346. /* Copy the NodeId */
  347. UA_StatusCode retval = UA_STATUSCODE_GOOD;
  348. if(addedNodeId) {
  349. retval = UA_NodeId_copy(&node->nodeId, addedNodeId);
  350. if(retval != UA_STATUSCODE_GOOD) {
  351. deleteEntry(container_of(node, UA_NodeMapEntry, node));
  352. END_CRITSECT(ns);
  353. return retval;
  354. }
  355. }
  356. /* Insert the node */
  357. UA_NodeMapEntry *oldEntry = *slot;
  358. UA_NodeMapEntry *newEntry = container_of(node, UA_NodeMapEntry, node);
  359. if(oldEntry > UA_NODEMAP_TOMBSTONE ||
  360. UA_atomic_cmpxchg((void**)slot, oldEntry,
  361. newEntry) != oldEntry) {
  362. deleteEntry(container_of(node, UA_NodeMapEntry, node));
  363. END_CRITSECT(ns);
  364. return UA_STATUSCODE_BADNODEIDEXISTS;
  365. }
  366. ++ns->count;
  367. END_CRITSECT(ns);
  368. return retval;
  369. }
  370. static UA_StatusCode
  371. UA_NodeMap_replaceNode(void *context, UA_Node *node) {
  372. UA_NodeMap *ns = (UA_NodeMap*)context;
  373. BEGIN_CRITSECT(ns);
  374. /* Find the node */
  375. UA_NodeMapEntry **slot = findOccupiedSlot(ns, &node->nodeId);
  376. if(!slot) {
  377. END_CRITSECT(ns);
  378. return UA_STATUSCODE_BADNODEIDUNKNOWN;
  379. }
  380. UA_NodeMapEntry *newEntryContainer = container_of(node, UA_NodeMapEntry, node);
  381. UA_NodeMapEntry *oldEntryContainer = *slot;
  382. /* The node was already updated since the copy was made? */
  383. if(oldEntryContainer != newEntryContainer->orig) {
  384. deleteEntry(newEntryContainer);
  385. END_CRITSECT(ns);
  386. return UA_STATUSCODE_BADINTERNALERROR;
  387. }
  388. /* Replace the entry with an atomic operation */
  389. if(UA_atomic_cmpxchg((void**)slot, oldEntryContainer,
  390. newEntryContainer) != oldEntryContainer) {
  391. deleteEntry(newEntryContainer);
  392. END_CRITSECT(ns);
  393. return UA_STATUSCODE_BADINTERNALERROR;
  394. }
  395. oldEntryContainer->deleted = true;
  396. cleanupEntry(oldEntryContainer);
  397. END_CRITSECT(ns);
  398. return UA_STATUSCODE_GOOD;
  399. }
  400. static void
  401. UA_NodeMap_iterate(void *context, void *visitorContext,
  402. UA_NodestoreVisitor visitor) {
  403. UA_NodeMap *ns = (UA_NodeMap*)context;
  404. BEGIN_CRITSECT(ns);
  405. for(UA_UInt32 i = 0; i < ns->size; ++i) {
  406. if(ns->entries[i] > UA_NODEMAP_TOMBSTONE) {
  407. END_CRITSECT(ns);
  408. UA_NodeMapEntry *entry = ns->entries[i];
  409. entry->refCount++;
  410. visitor(visitorContext, &entry->node);
  411. entry->refCount--;
  412. cleanupEntry(entry);
  413. BEGIN_CRITSECT(ns);
  414. }
  415. }
  416. END_CRITSECT(ns);
  417. }
  418. static void
  419. UA_NodeMap_delete(void *context) {
  420. UA_NodeMap *ns = (UA_NodeMap*)context;
  421. #ifdef UA_ENABLE_MULTITHREADING
  422. pthread_mutex_destroy(&ns->mutex);
  423. #endif
  424. UA_UInt32 size = ns->size;
  425. UA_NodeMapEntry **entries = ns->entries;
  426. for(UA_UInt32 i = 0; i < size; ++i) {
  427. if(entries[i] > UA_NODEMAP_TOMBSTONE) {
  428. /* On debugging builds, check that all nodes were release */
  429. UA_assert(entries[i]->refCount == 0);
  430. /* Delete the node */
  431. deleteEntry(entries[i]);
  432. }
  433. }
  434. UA_free(ns->entries);
  435. UA_free(ns);
  436. }
  437. UA_StatusCode
  438. UA_Nodestore_default_new(UA_Nodestore *ns) {
  439. /* Allocate and initialize the nodemap */
  440. UA_NodeMap *nodemap = (UA_NodeMap*)UA_malloc(sizeof(UA_NodeMap));
  441. if(!nodemap)
  442. return UA_STATUSCODE_BADOUTOFMEMORY;
  443. nodemap->sizePrimeIndex = higher_prime_index(UA_NODEMAP_MINSIZE);
  444. nodemap->size = primes[nodemap->sizePrimeIndex];
  445. nodemap->count = 0;
  446. nodemap->entries = (UA_NodeMapEntry**)
  447. UA_calloc(nodemap->size, sizeof(UA_NodeMapEntry*));
  448. if(!nodemap->entries) {
  449. UA_free(nodemap);
  450. return UA_STATUSCODE_BADOUTOFMEMORY;
  451. }
  452. #ifdef UA_ENABLE_MULTITHREADING
  453. pthread_mutex_init(&nodemap->mutex, NULL);
  454. #endif
  455. /* Populate the nodestore */
  456. ns->context = nodemap;
  457. ns->deleteNodestore = UA_NodeMap_delete;
  458. ns->inPlaceEditAllowed = true;
  459. ns->newNode = UA_NodeMap_newNode;
  460. ns->deleteNode = UA_NodeMap_deleteNode;
  461. ns->getNode = UA_NodeMap_getNode;
  462. ns->releaseNode = UA_NodeMap_releaseNode;
  463. ns->getNodeCopy = UA_NodeMap_getNodeCopy;
  464. ns->insertNode = UA_NodeMap_insertNode;
  465. ns->replaceNode = UA_NodeMap_replaceNode;
  466. ns->removeNode = UA_NodeMap_removeNode;
  467. ns->iterate = UA_NodeMap_iterate;
  468. return UA_STATUSCODE_GOOD;
  469. }