ua_namespace.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. #include "ua_namespace.h"
  2. #include <string.h>
  3. #include <stdio.h>
  4. /*****************************************/
  5. /* Internal (not exported) functionality */
  6. /*****************************************/
  7. struct Namespace_Entry {
  8. UA_UInt64 status; /* 2 bits status | 14 bits checkout count | 48 bits timestamp */
  9. const UA_Node *node; /* Nodes are immutable. It is not recommended to change nodes in place */
  10. };
  11. struct Namespace_Entry_Lock {
  12. Namespace_Entry *entry;
  13. };
  14. /* The tombstone (entry.node == 0x01) indicates that an entry was deleted at the position in the
  15. hash-map. This is information is used to decide whether the entire table shall be rehashed so
  16. that entries are found faster. */
  17. #define ENTRY_EMPTY UA_NULL
  18. #define ENTRY_TOMBSTONE 0x01
  19. /* The central data structure is a hash-map of UA_Node objects. Entry lookup via Algorithm D from
  20. Knuth's TAOCP (no linked lists here). Table of primes and mod-functions are from libiberty
  21. (licensed under LGPL) */ typedef UA_UInt32 hash_t;
  22. struct prime_ent {
  23. hash_t prime;
  24. hash_t inv;
  25. hash_t inv_m2; /* inverse of prime-2 */
  26. hash_t shift;
  27. };
  28. static struct prime_ent const prime_tab[] = {
  29. {7, 0x24924925, 0x9999999b, 2},
  30. {13, 0x3b13b13c, 0x745d1747, 3},
  31. {31, 0x08421085, 0x1a7b9612, 4},
  32. {61, 0x0c9714fc, 0x15b1e5f8, 5},
  33. {127, 0x02040811, 0x0624dd30, 6},
  34. {251, 0x05197f7e, 0x073260a5, 7},
  35. {509, 0x01824366, 0x02864fc8, 8},
  36. {1021, 0x00c0906d, 0x014191f7, 9},
  37. {2039, 0x0121456f, 0x0161e69e, 10},
  38. {4093, 0x00300902, 0x00501908, 11},
  39. {8191, 0x00080041, 0x00180241, 12},
  40. {16381, 0x000c0091, 0x00140191, 13},
  41. {32749, 0x002605a5, 0x002a06e6, 14},
  42. {65521, 0x000f00e2, 0x00110122, 15},
  43. {131071, 0x00008001, 0x00018003, 16},
  44. {262139, 0x00014002, 0x0001c004, 17},
  45. {524287, 0x00002001, 0x00006001, 18},
  46. {1048573, 0x00003001, 0x00005001, 19},
  47. {2097143, 0x00004801, 0x00005801, 20},
  48. {4194301, 0x00000c01, 0x00001401, 21},
  49. {8388593, 0x00001e01, 0x00002201, 22},
  50. {16777213, 0x00000301, 0x00000501, 23},
  51. {33554393, 0x00001381, 0x00001481, 24},
  52. {67108859, 0x00000141, 0x000001c1, 25},
  53. {134217689, 0x000004e1, 0x00000521, 26},
  54. {268435399, 0x00000391, 0x000003b1, 27},
  55. {536870909, 0x00000019, 0x00000029, 28},
  56. {1073741789, 0x0000008d, 0x00000095, 29},
  57. {2147483647, 0x00000003, 0x00000007, 30},
  58. /* Avoid "decimal constant so large it is unsigned" for 4294967291. */
  59. {0xfffffffb, 0x00000006, 0x00000008, 31}
  60. };
  61. /* Hashing inspired by code from from http://www.azillionmonkeys.com/qed/hash.html, licensed under
  62. the LGPL 2.1 */
  63. #undef get16bits
  64. #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) || \
  65. defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
  66. #define get16bits(d) (*((const uint16_t *) (d)))
  67. #endif
  68. #if !defined (get16bits)
  69. #define get16bits(d) ((((UA_UInt32)(((const uint8_t *)(d))[1])) << 8) + \
  70. (UA_UInt32)(((const uint8_t *)(d))[0]) )
  71. #endif
  72. static inline hash_t hash_string(const UA_Byte * data, UA_Int32 len) {
  73. hash_t hash = len;
  74. hash_t tmp;
  75. int rem;
  76. if(len <= 0 || data == UA_NULL)
  77. return 0;
  78. rem = len & 3;
  79. len >>= 2;
  80. /* Main loop */
  81. for(; len > 0; len--) {
  82. hash += get16bits(data);
  83. tmp = (get16bits(data + 2) << 11) ^ hash;
  84. hash = (hash << 16) ^ tmp;
  85. data += 2 * sizeof(uint16_t);
  86. hash += hash >> 11;
  87. }
  88. /* Handle end cases */
  89. switch (rem) {
  90. case 3:
  91. hash += get16bits(data);
  92. hash ^= hash << 16;
  93. hash ^= ((signed char)data[sizeof(uint16_t)]) << 18;
  94. hash += hash >> 11;
  95. break;
  96. case 2:
  97. hash += get16bits(data);
  98. hash ^= hash << 11;
  99. hash += hash >> 17;
  100. break;
  101. case 1:
  102. hash += (signed char)*data;
  103. hash ^= hash << 10;
  104. hash += hash >> 1;
  105. break;
  106. }
  107. /* Force "avalanching" of final 127 bits */
  108. hash ^= hash << 3;
  109. hash += hash >> 5;
  110. hash ^= hash << 4;
  111. hash += hash >> 17;
  112. hash ^= hash << 25;
  113. hash += hash >> 6;
  114. return hash;
  115. }
  116. static inline hash_t hash(const UA_NodeId * n) {
  117. switch (n->encodingByte) {
  118. case UA_NODEIDTYPE_TWOBYTE:
  119. case UA_NODEIDTYPE_FOURBYTE:
  120. case UA_NODEIDTYPE_NUMERIC:
  121. return (n->identifier.numeric * 2654435761) % 2 ^ 32; // Knuth's multiplicative hashing
  122. case UA_NODEIDTYPE_STRING:
  123. return hash_string(n->identifier.string.data, n->identifier.string.length);
  124. case UA_NODEIDTYPE_GUID:
  125. return hash_string((UA_Byte *) & (n->identifier.guid), sizeof(UA_Guid));
  126. case UA_NODEIDTYPE_BYTESTRING:
  127. return hash_string((UA_Byte *) n->identifier.byteString.data, n->identifier.byteString.length);
  128. default:
  129. return 0;
  130. }
  131. }
  132. /* The following function returns an index into the above table of the nearest prime number which
  133. is greater than N, and near a power of two. */
  134. static inline unsigned int higher_prime_index(unsigned long n) {
  135. unsigned int low = 0;
  136. unsigned int high = sizeof(prime_tab) / sizeof(prime_tab[0]);
  137. while(low != high) {
  138. unsigned int mid = low + (high - low) / 2;
  139. if(n > prime_tab[mid].prime)
  140. low = mid + 1;
  141. else
  142. high = mid;
  143. }
  144. return low;
  145. }
  146. static inline hash_t mod_1(hash_t x, hash_t y, hash_t inv, int shift) {
  147. /* The multiplicative inverses computed above are for 32-bit types, and requires that we are
  148. able to compute a highpart multiply. */
  149. #ifdef UNSIGNED_64BIT_TYPE
  150. __extension__ typedef UNSIGNED_64BIT_TYPE ull;
  151. if(sizeof(hash_t) * CHAR_BIT <= 32) {
  152. hash_t t1, t2, t3, t4, q, r;
  153. t1 = ((ull) x * inv) >> 32;
  154. t2 = x - t1;
  155. t3 = t2 >> 1;
  156. t4 = t1 + t3;
  157. q = t4 >> shift;
  158. r = x - (q * y);
  159. return r;
  160. }
  161. #endif
  162. return x % y;
  163. }
  164. static inline hash_t mod(hash_t h, const Namespace * ns) {
  165. const struct prime_ent *p = &prime_tab[ns->sizePrimeIndex];
  166. return mod_1(h, p->prime, p->inv, p->shift);
  167. }
  168. static inline hash_t mod_m2(hash_t h, const Namespace * ns) {
  169. const struct prime_ent *p = &prime_tab[ns->sizePrimeIndex];
  170. return 1 + mod_1(h, p->prime - 2, p->inv_m2, p->shift);
  171. }
  172. static inline void clear_entry(Namespace * ns, Namespace_Entry * entry) {
  173. if(entry->node == UA_NULL)
  174. return;
  175. switch (entry->node->nodeClass) {
  176. case UA_NODECLASS_OBJECT:
  177. UA_ObjectNode_delete((UA_ObjectNode *) entry->node);
  178. break;
  179. case UA_NODECLASS_VARIABLE:
  180. UA_VariableNode_delete((UA_VariableNode *) entry->node);
  181. break;
  182. case UA_NODECLASS_METHOD:
  183. UA_MethodNode_delete((UA_MethodNode *) entry->node);
  184. break;
  185. case UA_NODECLASS_OBJECTTYPE:
  186. UA_ObjectTypeNode_delete((UA_ObjectTypeNode *) entry->node);
  187. break;
  188. case UA_NODECLASS_VARIABLETYPE:
  189. UA_VariableTypeNode_delete((UA_VariableTypeNode *) entry->node);
  190. break;
  191. case UA_NODECLASS_REFERENCETYPE:
  192. UA_ReferenceTypeNode_delete((UA_ReferenceTypeNode *) entry->node);
  193. break;
  194. case UA_NODECLASS_DATATYPE:
  195. UA_DataTypeNode_delete((UA_DataTypeNode *) entry->node);
  196. break;
  197. case UA_NODECLASS_VIEW:
  198. UA_ViewNode_delete((UA_ViewNode *) entry->node);
  199. break;
  200. default:
  201. break;
  202. }
  203. entry->node = UA_NULL;
  204. }
  205. /* Returns UA_SUCCESS if an entry was found. Otherwise, UA_ERROR is returned and the "entry"
  206. argument points to the first free entry under the NodeId. */
  207. static inline UA_Int32 find_entry(const Namespace * ns, const UA_NodeId * nodeid, Namespace_Entry ** entry) {
  208. hash_t h = hash(nodeid);
  209. hash_t index = mod(h, ns);
  210. UA_UInt32 size = ns->size;
  211. Namespace_Entry *e = &ns->entries[index];
  212. if(e->node == UA_NULL) {
  213. *entry = e;
  214. return UA_ERROR;
  215. }
  216. if(UA_NodeId_compare(&e->node->nodeId, nodeid) == UA_EQUAL) {
  217. *entry = e;
  218. return UA_SUCCESS;
  219. }
  220. hash_t hash2 = mod_m2(h, ns);
  221. for(;;) {
  222. index += hash2;
  223. if(index >= size)
  224. index -= size;
  225. e = &ns->entries[index];
  226. if(e->node == UA_NULL) {
  227. *entry = e;
  228. return UA_ERROR;
  229. }
  230. if(UA_NodeId_compare(&e->node->nodeId, nodeid) == UA_EQUAL) {
  231. *entry = e;
  232. return UA_SUCCESS;
  233. }
  234. }
  235. /* NOTREACHED */
  236. return UA_SUCCESS;
  237. }
  238. /* The following function changes size of memory allocated for the entries and repeatedly inserts
  239. the table elements. The occupancy of the table after the call will be about 50%. Naturally the
  240. hash table must already exist. Remember also that the place of the table entries is changed. If
  241. memory allocation failures are allowed, this function will return zero, indicating that the
  242. table could not be expanded. If all goes well, it will return a non-zero value. */
  243. static UA_Int32 expand(Namespace * ns) {
  244. Namespace_Entry *nentries;
  245. int32_t nsize;
  246. UA_UInt32 nindex;
  247. Namespace_Entry *oentries = ns->entries;
  248. int32_t osize = ns->size;
  249. Namespace_Entry *olimit = &oentries[osize];
  250. int32_t count = ns->count;
  251. /* Resize only when table after removal of unused elements is either too full or too empty. */
  252. if(count * 2 < osize && (count * 8 > osize || osize <= 32)) {
  253. return UA_SUCCESS;
  254. }
  255. nindex = higher_prime_index(count * 2);
  256. nsize = prime_tab[nindex].prime;
  257. if(UA_alloc((void **)&nentries, sizeof(Namespace_Entry) * nsize) != UA_SUCCESS)
  258. return UA_ERR_NO_MEMORY;
  259. ns->entries = nentries;
  260. ns->size = nsize;
  261. ns->sizePrimeIndex = nindex;
  262. Namespace_Entry *p = oentries;
  263. do {
  264. if(p->node != UA_NULL) {
  265. Namespace_Entry *e;
  266. find_entry(ns, &p->node->nodeId, &e); /* We know this returns an empty entry here */
  267. *e = *p;
  268. }
  269. p++;
  270. } while(p < olimit);
  271. UA_free(oentries);
  272. return UA_SUCCESS;
  273. }
  274. /**********************/
  275. /* Exported functions */
  276. /**********************/
  277. UA_Int32 Namespace_new(Namespace ** result, UA_UInt32 size, UA_UInt32 namespaceId) {
  278. Namespace *ns;
  279. if(UA_alloc((void **)&ns, sizeof(Namespace)) != UA_SUCCESS)
  280. return UA_ERR_NO_MEMORY;
  281. UA_UInt32 sizePrimeIndex = higher_prime_index(size);
  282. size = prime_tab[sizePrimeIndex].prime;
  283. if(UA_alloc((void **)&ns->entries, sizeof(Namespace_Entry) * size) != UA_SUCCESS) {
  284. UA_free(ns);
  285. return UA_ERR_NO_MEMORY;
  286. }
  287. /* set entries to zero */
  288. memset(ns->entries, 0, size * sizeof(Namespace_Entry));
  289. *ns = (Namespace) {
  290. namespaceId, ns->entries, size, 0, sizePrimeIndex};
  291. *result = ns;
  292. return UA_SUCCESS;
  293. }
  294. static void Namespace_clear(Namespace * ns) {
  295. UA_UInt32 size = ns->size;
  296. Namespace_Entry *entries = ns->entries;
  297. for(UA_UInt32 i = 0; i < size; i++)
  298. clear_entry(ns, &entries[i]);
  299. ns->count = 0;
  300. }
  301. void Namespace_empty(Namespace * ns) {
  302. Namespace_clear(ns);
  303. /* Downsize the table. */
  304. if(ns->size > 1024 * 1024 / sizeof(Namespace_Entry)) {
  305. int nindex = higher_prime_index(1024 / sizeof(Namespace_Entry));
  306. int nsize = prime_tab[nindex].prime;
  307. UA_free(ns->entries);
  308. UA_alloc((void **)&ns->entries, sizeof(Namespace_Entry) * nsize); // FIXME: Check return
  309. // value
  310. ns->size = nsize;
  311. ns->sizePrimeIndex = nindex;
  312. }
  313. }
  314. void Namespace_delete(Namespace * ns) {
  315. Namespace_clear(ns);
  316. UA_free(ns->entries);
  317. UA_free(ns);
  318. }
  319. UA_Int32 Namespace_insert(Namespace * ns, const UA_Node * node) {
  320. if(ns->size * 3 <= ns->count * 4) {
  321. if(expand(ns) != UA_SUCCESS)
  322. return UA_ERROR;
  323. }
  324. Namespace_Entry *entry;
  325. UA_Int32 found = find_entry(ns, &node->nodeId, &entry);
  326. if(found == UA_SUCCESS)
  327. return UA_ERROR; /* There is already an entry for that nodeid */
  328. entry->node = node;
  329. ns->count++;
  330. return UA_SUCCESS;
  331. }
  332. UA_Int32 Namespace_insertUnique(Namespace * ns, UA_Node * node) {
  333. if(ns->size * 3 <= ns->count * 4) {
  334. if(expand(ns) != UA_SUCCESS)
  335. return UA_ERROR;
  336. }
  337. // find unoccupied numeric nodeid
  338. node->nodeId.namespace = ns->namespaceId;
  339. node->nodeId.encodingByte = UA_NODEIDTYPE_NUMERIC;
  340. node->nodeId.identifier.numeric = ns->count;
  341. hash_t h = hash(&node->nodeId);
  342. hash_t hash2 = mod_m2(h, ns);
  343. UA_UInt32 size = ns->size;
  344. // advance integer (hash) until a free entry is found
  345. Namespace_Entry *entry = UA_NULL;
  346. while(1) {
  347. if(find_entry(ns, &node->nodeId, &entry) != UA_SUCCESS)
  348. break;
  349. node->nodeId.identifier.numeric += hash2;
  350. if(node->nodeId.identifier.numeric >= size)
  351. node->nodeId.identifier.numeric -= size;
  352. }
  353. entry->node = node;
  354. ns->count++;
  355. return UA_SUCCESS;
  356. }
  357. UA_Int32 Namespace_contains(const Namespace * ns, const UA_NodeId * nodeid) {
  358. Namespace_Entry *entry;
  359. return find_entry(ns, nodeid, &entry);
  360. }
  361. UA_Int32 Namespace_get(Namespace const *ns, const UA_NodeId * nodeid, UA_Node const **result,
  362. Namespace_Entry_Lock ** lock) {
  363. Namespace_Entry *entry;
  364. if(find_entry(ns, nodeid, &entry) != UA_SUCCESS)
  365. return UA_ERROR;
  366. *result = entry->node;
  367. return UA_SUCCESS;
  368. }
  369. UA_Int32 Namespace_remove(Namespace * ns, const UA_NodeId * nodeid) {
  370. Namespace_Entry *entry;
  371. if(find_entry(ns, nodeid, &entry) != UA_SUCCESS)
  372. return UA_ERROR;
  373. // TODO: Check if deleting the node makes the Namespace inconsistent.
  374. clear_entry(ns, entry);
  375. /* Downsize the hashmap if it is very empty */
  376. if(ns->count * 8 < ns->size && ns->size > 32)
  377. expand(ns);
  378. return UA_SUCCESS;
  379. }
  380. UA_Int32 Namespace_iterate(const Namespace * ns, Namespace_nodeVisitor visitor) {
  381. UA_UInt32 i;
  382. for(i = 0; i < ns->size; i++) {
  383. Namespace_Entry *entry = &ns->entries[i];
  384. if(entry != UA_NULL && visitor != UA_NULL)
  385. visitor(entry->node);
  386. }
  387. return UA_SUCCESS;
  388. }
  389. void Namespace_Entry_Lock_release(Namespace_Entry_Lock * lock) {
  390. ;
  391. }