ua_namespace.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504
  1. #include "ua_namespace.h"
  2. #include <string.h>
  3. #include <stdio.h>
  4. /*****************************************/
  5. /* Internal (not exported) functionality */
  6. /*****************************************/
  7. UA_Int32 Namespace_TransactionContext_init(Namespace_TransactionContext * tc) {
  8. return UA_list_init((UA_list_List *) tc);
  9. }
  10. /* The central data structure is a hash-map of UA_Node objects. Entry lookup via Algorithm D from
  11. Knuth's TAOCP (no linked lists here). Table of primes and mod-functions are from libiberty
  12. (licensed under LGPL) */
  13. typedef UA_UInt32 hash_t;
  14. struct prime_ent {
  15. hash_t prime;
  16. hash_t inv;
  17. hash_t inv_m2; /* inverse of prime-2 */
  18. hash_t shift;
  19. };
  20. static struct prime_ent const prime_tab[] = {
  21. {7, 0x24924925, 0x9999999b, 2},
  22. {13, 0x3b13b13c, 0x745d1747, 3},
  23. {31, 0x08421085, 0x1a7b9612, 4},
  24. {61, 0x0c9714fc, 0x15b1e5f8, 5},
  25. {127, 0x02040811, 0x0624dd30, 6},
  26. {251, 0x05197f7e, 0x073260a5, 7},
  27. {509, 0x01824366, 0x02864fc8, 8},
  28. {1021, 0x00c0906d, 0x014191f7, 9},
  29. {2039, 0x0121456f, 0x0161e69e, 10},
  30. {4093, 0x00300902, 0x00501908, 11},
  31. {8191, 0x00080041, 0x00180241, 12},
  32. {16381, 0x000c0091, 0x00140191, 13},
  33. {32749, 0x002605a5, 0x002a06e6, 14},
  34. {65521, 0x000f00e2, 0x00110122, 15},
  35. {131071, 0x00008001, 0x00018003, 16},
  36. {262139, 0x00014002, 0x0001c004, 17},
  37. {524287, 0x00002001, 0x00006001, 18},
  38. {1048573, 0x00003001, 0x00005001, 19},
  39. {2097143, 0x00004801, 0x00005801, 20},
  40. {4194301, 0x00000c01, 0x00001401, 21},
  41. {8388593, 0x00001e01, 0x00002201, 22},
  42. {16777213, 0x00000301, 0x00000501, 23},
  43. {33554393, 0x00001381, 0x00001481, 24},
  44. {67108859, 0x00000141, 0x000001c1, 25},
  45. {134217689, 0x000004e1, 0x00000521, 26},
  46. {268435399, 0x00000391, 0x000003b1, 27},
  47. {536870909, 0x00000019, 0x00000029, 28},
  48. {1073741789, 0x0000008d, 0x00000095, 29},
  49. {2147483647, 0x00000003, 0x00000007, 30},
  50. /* Avoid "decimal constant so large it is unsigned" for 4294967291. */
  51. {0xfffffffb, 0x00000006, 0x00000008, 31}
  52. };
  53. /* Hashing inspired by code from from http://www.azillionmonkeys.com/qed/hash.html, licensed under
  54. the LGPL 2.1 */
  55. #undef get16bits
  56. #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) || \
  57. defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
  58. #define get16bits(d) (*((const uint16_t *) (d)))
  59. #endif
  60. #if !defined (get16bits)
  61. #define get16bits(d) ((((UA_UInt32)(((const uint8_t *)(d))[1])) << 8) + \
  62. (UA_UInt32)(((const uint8_t *)(d))[0]) )
  63. #endif
  64. static inline hash_t hash_string(const UA_Byte * data, UA_Int32 len) {
  65. hash_t hash = len;
  66. hash_t tmp;
  67. int rem;
  68. if(len <= 0 || data == UA_NULL)
  69. return 0;
  70. rem = len & 3;
  71. len >>= 2;
  72. /* Main loop */
  73. for(; len > 0; len--) {
  74. hash += get16bits(data);
  75. tmp = (get16bits(data + 2) << 11) ^ hash;
  76. hash = (hash << 16) ^ tmp;
  77. data += 2 * sizeof(uint16_t);
  78. hash += hash >> 11;
  79. }
  80. /* Handle end cases */
  81. switch (rem) {
  82. case 3:
  83. hash += get16bits(data);
  84. hash ^= hash << 16;
  85. hash ^= ((signed char)data[sizeof(uint16_t)]) << 18;
  86. hash += hash >> 11;
  87. break;
  88. case 2:
  89. hash += get16bits(data);
  90. hash ^= hash << 11;
  91. hash += hash >> 17;
  92. break;
  93. case 1:
  94. hash += (signed char)*data;
  95. hash ^= hash << 10;
  96. hash += hash >> 1;
  97. break;
  98. }
  99. /* Force "avalanching" of final 127 bits */
  100. hash ^= hash << 3;
  101. hash += hash >> 5;
  102. hash ^= hash << 4;
  103. hash += hash >> 17;
  104. hash ^= hash << 25;
  105. hash += hash >> 6;
  106. return hash;
  107. }
  108. static inline hash_t hash(const UA_NodeId * n) {
  109. switch (n->encodingByte) {
  110. case UA_NODEIDTYPE_TWOBYTE:
  111. case UA_NODEIDTYPE_FOURBYTE:
  112. case UA_NODEIDTYPE_NUMERIC:
  113. return (n->identifier.numeric * 2654435761) % 2 ^ 32; // Knuth's multiplicative hashing
  114. case UA_NODEIDTYPE_STRING:
  115. return hash_string(n->identifier.string.data, n->identifier.string.length);
  116. case UA_NODEIDTYPE_GUID:
  117. return hash_string((UA_Byte *) & (n->identifier.guid), sizeof(UA_Guid));
  118. case UA_NODEIDTYPE_BYTESTRING:
  119. return hash_string((UA_Byte *) n->identifier.byteString.data, n->identifier.byteString.length);
  120. default:
  121. return 0;
  122. }
  123. }
  124. /* The following function returns an index into the above table of the nearest prime number which
  125. is greater than N, and near a power of two. */
  126. static inline unsigned int higher_prime_index(unsigned long n) {
  127. unsigned int low = 0;
  128. unsigned int high = sizeof(prime_tab) / sizeof(prime_tab[0]);
  129. while(low != high) {
  130. unsigned int mid = low + (high - low) / 2;
  131. if(n > prime_tab[mid].prime)
  132. low = mid + 1;
  133. else
  134. high = mid;
  135. }
  136. return low;
  137. }
  138. static inline hash_t mod_1(hash_t x, hash_t y, hash_t inv, int shift) {
  139. /* The multiplicative inverses computed above are for 32-bit types, and requires that we are
  140. able to compute a highpart multiply. */
  141. #ifdef UNSIGNED_64BIT_TYPE
  142. __extension__ typedef UNSIGNED_64BIT_TYPE ull;
  143. if(sizeof(hash_t) * CHAR_BIT <= 32) {
  144. hash_t t1, t2, t3, t4, q, r;
  145. t1 = ((ull) x * inv) >> 32;
  146. t2 = x - t1;
  147. t3 = t2 >> 1;
  148. t4 = t1 + t3;
  149. q = t4 >> shift;
  150. r = x - (q * y);
  151. return r;
  152. }
  153. #endif
  154. return x % y;
  155. }
  156. static inline hash_t mod(hash_t h, const Namespace * ns) {
  157. const struct prime_ent *p = &prime_tab[ns->sizePrimeIndex];
  158. return mod_1(h, p->prime, p->inv, p->shift);
  159. }
  160. static inline hash_t mod_m2(hash_t h, const Namespace * ns) {
  161. const struct prime_ent *p = &prime_tab[ns->sizePrimeIndex];
  162. return 1 + mod_1(h, p->prime - 2, p->inv_m2, p->shift);
  163. }
  164. static inline void clear_slot(Namespace * ns, Namespace_Entry * slot) {
  165. if(slot->node == UA_NULL)
  166. return;
  167. #ifdef MULTITHREADING
  168. pthread_rwlock_wrlock((pthread_rwlock_t *) slot->lock); /* Get write lock. */
  169. #endif
  170. switch (slot->node->nodeClass) {
  171. case UA_NODECLASS_OBJECT:
  172. UA_ObjectNode_delete((UA_ObjectNode *) slot->node);
  173. break;
  174. case UA_NODECLASS_VARIABLE:
  175. UA_VariableNode_delete((UA_VariableNode *) slot->node);
  176. break;
  177. case UA_NODECLASS_METHOD:
  178. UA_MethodNode_delete((UA_MethodNode *) slot->node);
  179. break;
  180. case UA_NODECLASS_OBJECTTYPE:
  181. UA_ObjectTypeNode_delete((UA_ObjectTypeNode *) slot->node);
  182. break;
  183. case UA_NODECLASS_VARIABLETYPE:
  184. UA_VariableTypeNode_delete((UA_VariableTypeNode *) slot->node);
  185. break;
  186. case UA_NODECLASS_REFERENCETYPE:
  187. UA_ReferenceTypeNode_delete((UA_ReferenceTypeNode *) slot->node);
  188. break;
  189. case UA_NODECLASS_DATATYPE:
  190. UA_DataTypeNode_delete((UA_DataTypeNode *) slot->node);
  191. break;
  192. case UA_NODECLASS_VIEW:
  193. UA_ViewNode_delete((UA_ViewNode *) slot->node);
  194. break;
  195. default:
  196. break;
  197. }
  198. slot->node = UA_NULL;
  199. #ifdef MULTITHREADING
  200. pthread_rwlock_destroy((pthread_rwlock_t *) slot->lock);
  201. UA_free(slot->lock);
  202. #endif
  203. }
  204. static inline UA_Int32 find_slot(const Namespace * ns, Namespace_Entry ** slot, const UA_NodeId * nodeid) {
  205. hash_t h = hash(nodeid);
  206. hash_t index, hash2;
  207. UA_UInt32 size;
  208. Namespace_Entry *entry;
  209. size = ns->size;
  210. index = mod(h, ns);
  211. entry = &ns->entries[index];
  212. if(entry == UA_NULL)
  213. return UA_ERROR;
  214. if(UA_NodeId_compare(&entry->node->nodeId, nodeid) == UA_EQUAL) {
  215. *slot = entry;
  216. return UA_SUCCESS;
  217. }
  218. hash2 = mod_m2(h, ns);
  219. for(;;) {
  220. index += hash2;
  221. if(index >= size)
  222. index -= size;
  223. entry = &ns->entries[index];
  224. if(entry == UA_NULL)
  225. return UA_ERROR;
  226. if(UA_NodeId_compare(&entry->node->nodeId, nodeid) == UA_EQUAL) {
  227. *slot = entry;
  228. return UA_SUCCESS;
  229. }
  230. }
  231. return UA_SUCCESS;
  232. }
  233. /* Always returns an empty slot. This is inevitable if the entries are not completely full. */
  234. static Namespace_Entry *find_empty_slot(const Namespace * ns, hash_t h) {
  235. hash_t index = mod(h, ns);
  236. UA_UInt32 size = ns->size;
  237. Namespace_Entry *slot = &ns->entries[index];
  238. if(slot->node == UA_NULL)
  239. return slot;
  240. hash_t hash2 = mod_m2(h, ns);
  241. for(;;) {
  242. index += hash2;
  243. if(index >= size)
  244. index -= size;
  245. slot = &ns->entries[index];
  246. if(slot->node == UA_NULL)
  247. return slot;
  248. }
  249. return UA_NULL;
  250. }
  251. /* The following function changes size of memory allocated for the entries and repeatedly inserts
  252. the table elements. The occupancy of the table after the call will be about 50%. Naturally the
  253. hash table must already exist. Remember also that the place of the table entries is changed. If
  254. memory allocation failures are allowed, this function will return zero, indicating that the
  255. table could not be expanded. If all goes well, it will return a non-zero value. */
  256. static UA_Int32 expand(Namespace * ns) {
  257. Namespace_Entry *nentries;
  258. int32_t nsize;
  259. UA_UInt32 nindex;
  260. Namespace_Entry *oentries = ns->entries;
  261. int32_t osize = ns->size;
  262. Namespace_Entry *olimit = &oentries[osize];
  263. int32_t count = ns->count;
  264. /* Resize only when table after removal of unused elements is either too full or too empty. */
  265. if(count * 2 < osize && (count * 8 > osize || osize <= 32)) {
  266. return UA_SUCCESS;
  267. }
  268. nindex = higher_prime_index(count * 2);
  269. nsize = prime_tab[nindex].prime;
  270. if(UA_alloc((void **)&nentries, sizeof(Namespace_Entry) * nsize) != UA_SUCCESS)
  271. return UA_ERR_NO_MEMORY;
  272. ns->entries = nentries;
  273. ns->size = nsize;
  274. ns->sizePrimeIndex = nindex;
  275. Namespace_Entry *p = oentries;
  276. do {
  277. if(p->node != UA_NULL) {
  278. Namespace_Entry *q = find_empty_slot(ns, hash(&p->node->nodeId));
  279. *q = *p;
  280. }
  281. p++;
  282. } while(p < olimit);
  283. UA_free(oentries);
  284. return UA_SUCCESS;
  285. }
  286. /**********************/
  287. /* Exported functions */
  288. /**********************/
  289. UA_Int32 Namespace_create(Namespace ** result, UA_UInt32 size) {
  290. Namespace *ns = UA_NULL;
  291. UA_UInt32 sizePrimeIndex = higher_prime_index(size);
  292. size = prime_tab[sizePrimeIndex].prime;
  293. if(UA_alloc((void **)&ns, sizeof(Namespace)) != UA_SUCCESS)
  294. return UA_ERR_NO_MEMORY;
  295. if(UA_alloc((void **)&ns->entries, sizeof(Namespace_Entry) * size) != UA_SUCCESS) {
  296. UA_free(ns);
  297. return UA_ERR_NO_MEMORY;
  298. }
  299. /**
  300. set entries to zero:
  301. ns->entries[i].lock = UA_NUll;
  302. ns->entries[i].node = UA_NULL;
  303. */
  304. memset(ns->entries, 0, size * sizeof(Namespace_Entry));
  305. ns->size = size;
  306. ns->count = 0;
  307. ns->sizePrimeIndex = sizePrimeIndex;
  308. *result = ns;
  309. return UA_SUCCESS;
  310. }
  311. static void Namespace_clear(Namespace * ns) {
  312. UA_UInt32 size = ns->size;
  313. Namespace_Entry *entries = ns->entries;
  314. for(UA_UInt32 i = 0; i < size; i++)
  315. clear_slot(ns, &entries[i]);
  316. ns->count = 0;
  317. }
  318. void Namespace_empty(Namespace * ns) {
  319. Namespace_clear(ns);
  320. /* Downsize the table. */
  321. if(ns->size > 1024 * 1024 / sizeof(Namespace_Entry)) {
  322. int nindex = higher_prime_index(1024 / sizeof(Namespace_Entry));
  323. int nsize = prime_tab[nindex].prime;
  324. UA_free(ns->entries);
  325. UA_alloc((void **)&ns->entries, sizeof(Namespace_Entry) * nsize); // FIXME: Check return
  326. // value
  327. ns->size = nsize;
  328. ns->sizePrimeIndex = nindex;
  329. }
  330. }
  331. void Namespace_delete(Namespace * ns) {
  332. Namespace_clear(ns);
  333. UA_free(ns->entries);
  334. UA_free(ns);
  335. }
  336. UA_Int32 Namespace_insert(Namespace * ns, UA_Node * node) {
  337. if(ns->size * 3 <= ns->count * 4) {
  338. if(expand(ns) != UA_SUCCESS)
  339. return UA_ERROR;
  340. }
  341. hash_t h = hash(&node->nodeId);
  342. Namespace_Entry *slot = find_empty_slot(ns, h);
  343. #ifdef MULTITHREADING
  344. if(UA_alloc((void **)&slot->lock, sizeof(pthread_rwlock_t)) != UA_SUCCESS)
  345. return UA_ERR_NO_MEMORY;
  346. pthread_rwlock_init((pthread_rwlock_t *) slot->lock, NULL);
  347. #endif
  348. slot->node = node;
  349. ns->count++;
  350. return UA_SUCCESS;
  351. }
  352. UA_Int32 Namespace_get(Namespace const *ns, const UA_NodeId * nodeid, UA_Node const **result, Namespace_Lock ** lock) {
  353. Namespace_Entry *slot;
  354. if(find_slot(ns, &slot, nodeid) != UA_SUCCESS)
  355. return UA_ERROR;
  356. #ifdef MULTITHREADING
  357. if(pthread_rwlock_rdlock((pthread_rwlock_t *) slot->lock) != 0)
  358. return UA_ERROR;
  359. *lock = slot->lock;
  360. #endif
  361. *result = slot->node;
  362. return UA_SUCCESS;
  363. }
  364. UA_Int32 Namespace_getWritable(const Namespace * ns, const UA_NodeId * nodeid, UA_Node ** result, Namespace_Lock ** lock) {
  365. Namespace_Entry *slot;
  366. if(find_slot(ns, &slot, nodeid) != UA_SUCCESS)
  367. return UA_ERROR;
  368. #ifdef MULTITHREADING
  369. if(pthread_rwlock_wrlock((pthread_rwlock_t *) slot->lock) != 0)
  370. return UA_ERROR;
  371. *lock = slot->lock;
  372. #endif
  373. *result = slot->node;
  374. return UA_SUCCESS;
  375. }
  376. #ifdef MULTITHREADING
  377. static inline void release_context_walker(void *lock) {
  378. pthread_rwlock_unlock(lock);
  379. }
  380. #endif
  381. UA_Int32 Namespace_transactionGet(Namespace * ns, Namespace_TransactionContext * tc, const UA_NodeId * nodeid, UA_Node ** const result, Namespace_Lock ** lock) {
  382. Namespace_Entry *slot;
  383. if(find_slot(ns, &slot, nodeid) != UA_SUCCESS)
  384. return UA_ERROR;
  385. #ifdef MULTITHREADING
  386. if(pthread_rwlock_tryrdlock((pthread_rwlock_t *) slot->lock) != 0) {
  387. /* Transaction failed. Release all acquired locks and bail out. */
  388. UA_list_destroy((UA_list_List *) tc, release_context_walker);
  389. return UA_ERROR;
  390. }
  391. UA_list_addPayloadToBack((UA_list_List *) tc, slot->lock);
  392. *lock = slot->lock;
  393. #endif
  394. *result = slot->node;
  395. return UA_SUCCESS;
  396. }
  397. UA_Int32 Namespace_transactionGetWritable(Namespace * ns, Namespace_TransactionContext * tc, const UA_NodeId * nodeid, UA_Node ** result, Namespace_Lock ** lock) {
  398. Namespace_Entry *slot;
  399. if(find_slot(ns, &slot, nodeid) != UA_SUCCESS)
  400. return UA_ERROR;
  401. #ifdef MULTITHREADING
  402. if(pthread_rwlock_trywrlock((pthread_rwlock_t *) slot->lock) != 0) {
  403. /* Transaction failed. Release all acquired locks and bail out. */
  404. UA_list_destroy((UA_list_List *) tc, release_context_walker);
  405. return UA_ERROR;
  406. }
  407. UA_list_addPayloadToBack((UA_list_List *) tc, slot->lock);
  408. *lock = slot->lock;
  409. #endif
  410. *result = slot->node;
  411. return UA_SUCCESS;
  412. }
  413. void Namespace_remove(Namespace * ns, UA_NodeId * nodeid) {
  414. Namespace_Entry *slot;
  415. if(find_slot(ns, &slot, nodeid) != UA_SUCCESS)
  416. return;
  417. // TODO: Check if deleting the node makes the Namespace inconsistent.
  418. clear_slot(ns, slot);
  419. /* Downsize the hashmap if it is very empty */
  420. if(ns->count * 8 < ns->size && ns->size > 32)
  421. expand(ns);
  422. }
  423. UA_Int32 Namespace_iterate(const Namespace * ns, Namespace_nodeVisitor visitor) {
  424. UA_UInt32 i;
  425. for(i = 0; i < ns->size; i++) {
  426. Namespace_Entry *entry = &ns->entries[i];
  427. if(entry != UA_NULL && visitor != UA_NULL)
  428. visitor(entry->node);
  429. }
  430. return UA_SUCCESS;
  431. }