ua_namespace.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. #include "ua_namespace.h"
  2. #include <string.h>
  3. #include <stdio.h>
  4. /*****************************************/
  5. /* Internal (not exported) functionality */
  6. /*****************************************/
  7. struct Namespace_Entry_Lock {
  8. Namespace_Entry *entry;
  9. };
  10. /* The tombstone (entry.node == 0x01) indicates that an entry was deleted at the position in the
  11. hash-map. This is information is used to decide whether the entire table shall be rehashed so
  12. that entries are found faster. */
  13. #define ENTRY_EMPTY UA_NULL
  14. #define ENTRY_TOMBSTONE 0x01
  15. /* The central data structure is a hash-map of UA_Node objects. Entry lookup via Algorithm D from
  16. Knuth's TAOCP (no linked lists here). Table of primes and mod-functions are from libiberty
  17. (licensed under LGPL) */ typedef UA_UInt32 hash_t;
  18. struct prime_ent {
  19. hash_t prime;
  20. hash_t inv;
  21. hash_t inv_m2; /* inverse of prime-2 */
  22. hash_t shift;
  23. };
  24. static struct prime_ent const prime_tab[] = {
  25. {7, 0x24924925, 0x9999999b, 2},
  26. {13, 0x3b13b13c, 0x745d1747, 3},
  27. {31, 0x08421085, 0x1a7b9612, 4},
  28. {61, 0x0c9714fc, 0x15b1e5f8, 5},
  29. {127, 0x02040811, 0x0624dd30, 6},
  30. {251, 0x05197f7e, 0x073260a5, 7},
  31. {509, 0x01824366, 0x02864fc8, 8},
  32. {1021, 0x00c0906d, 0x014191f7, 9},
  33. {2039, 0x0121456f, 0x0161e69e, 10},
  34. {4093, 0x00300902, 0x00501908, 11},
  35. {8191, 0x00080041, 0x00180241, 12},
  36. {16381, 0x000c0091, 0x00140191, 13},
  37. {32749, 0x002605a5, 0x002a06e6, 14},
  38. {65521, 0x000f00e2, 0x00110122, 15},
  39. {131071, 0x00008001, 0x00018003, 16},
  40. {262139, 0x00014002, 0x0001c004, 17},
  41. {524287, 0x00002001, 0x00006001, 18},
  42. {1048573, 0x00003001, 0x00005001, 19},
  43. {2097143, 0x00004801, 0x00005801, 20},
  44. {4194301, 0x00000c01, 0x00001401, 21},
  45. {8388593, 0x00001e01, 0x00002201, 22},
  46. {16777213, 0x00000301, 0x00000501, 23},
  47. {33554393, 0x00001381, 0x00001481, 24},
  48. {67108859, 0x00000141, 0x000001c1, 25},
  49. {134217689, 0x000004e1, 0x00000521, 26},
  50. {268435399, 0x00000391, 0x000003b1, 27},
  51. {536870909, 0x00000019, 0x00000029, 28},
  52. {1073741789, 0x0000008d, 0x00000095, 29},
  53. {2147483647, 0x00000003, 0x00000007, 30},
  54. /* Avoid "decimal constant so large it is unsigned" for 4294967291. */
  55. {0xfffffffb, 0x00000006, 0x00000008, 31}
  56. };
  57. /* Hashing inspired by code from from http://www.azillionmonkeys.com/qed/hash.html, licensed under
  58. the LGPL 2.1 */
  59. #undef get16bits
  60. #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) || \
  61. defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
  62. #define get16bits(d) (*((const uint16_t *) (d)))
  63. #endif
  64. #if !defined (get16bits)
  65. #define get16bits(d) ((((UA_UInt32)(((const uint8_t *)(d))[1])) << 8) + \
  66. (UA_UInt32)(((const uint8_t *)(d))[0]) )
  67. #endif
  68. static inline hash_t hash_string(const UA_Byte * data, UA_Int32 len) {
  69. hash_t hash = len;
  70. hash_t tmp;
  71. int rem;
  72. if(len <= 0 || data == UA_NULL)
  73. return 0;
  74. rem = len & 3;
  75. len >>= 2;
  76. /* Main loop */
  77. for(; len > 0; len--) {
  78. hash += get16bits(data);
  79. tmp = (get16bits(data + 2) << 11) ^ hash;
  80. hash = (hash << 16) ^ tmp;
  81. data += 2 * sizeof(uint16_t);
  82. hash += hash >> 11;
  83. }
  84. /* Handle end cases */
  85. switch (rem) {
  86. case 3:
  87. hash += get16bits(data);
  88. hash ^= hash << 16;
  89. hash ^= ((signed char)data[sizeof(uint16_t)]) << 18;
  90. hash += hash >> 11;
  91. break;
  92. case 2:
  93. hash += get16bits(data);
  94. hash ^= hash << 11;
  95. hash += hash >> 17;
  96. break;
  97. case 1:
  98. hash += (signed char)*data;
  99. hash ^= hash << 10;
  100. hash += hash >> 1;
  101. break;
  102. }
  103. /* Force "avalanching" of final 127 bits */
  104. hash ^= hash << 3;
  105. hash += hash >> 5;
  106. hash ^= hash << 4;
  107. hash += hash >> 17;
  108. hash ^= hash << 25;
  109. hash += hash >> 6;
  110. return hash;
  111. }
  112. static inline hash_t hash(const UA_NodeId * n) {
  113. switch (n->encodingByte) {
  114. case UA_NODEIDTYPE_TWOBYTE:
  115. case UA_NODEIDTYPE_FOURBYTE:
  116. case UA_NODEIDTYPE_NUMERIC:
  117. return (n->identifier.numeric * 2654435761) % 2 ^ 32; // Knuth's multiplicative hashing
  118. case UA_NODEIDTYPE_STRING:
  119. return hash_string(n->identifier.string.data, n->identifier.string.length);
  120. case UA_NODEIDTYPE_GUID:
  121. return hash_string((UA_Byte *) & (n->identifier.guid), sizeof(UA_Guid));
  122. case UA_NODEIDTYPE_BYTESTRING:
  123. return hash_string((UA_Byte *) n->identifier.byteString.data, n->identifier.byteString.length);
  124. default:
  125. return 0;
  126. }
  127. }
  128. /* The following function returns an index into the above table of the nearest prime number which
  129. is greater than N, and near a power of two. */
  130. static inline unsigned int higher_prime_index(unsigned long n) {
  131. unsigned int low = 0;
  132. unsigned int high = sizeof(prime_tab) / sizeof(prime_tab[0]);
  133. while(low != high) {
  134. unsigned int mid = low + (high - low) / 2;
  135. if(n > prime_tab[mid].prime)
  136. low = mid + 1;
  137. else
  138. high = mid;
  139. }
  140. return low;
  141. }
  142. static inline hash_t mod_1(hash_t x, hash_t y, hash_t inv, int shift) {
  143. /* The multiplicative inverses computed above are for 32-bit types, and requires that we are
  144. able to compute a highpart multiply. */
  145. #ifdef UNSIGNED_64BIT_TYPE
  146. __extension__ typedef UNSIGNED_64BIT_TYPE ull;
  147. if(sizeof(hash_t) * CHAR_BIT <= 32) {
  148. hash_t t1, t2, t3, t4, q, r;
  149. t1 = ((ull) x * inv) >> 32;
  150. t2 = x - t1;
  151. t3 = t2 >> 1;
  152. t4 = t1 + t3;
  153. q = t4 >> shift;
  154. r = x - (q * y);
  155. return r;
  156. }
  157. #endif
  158. return x % y;
  159. }
  160. static inline hash_t mod(hash_t h, const Namespace * ns) {
  161. const struct prime_ent *p = &prime_tab[ns->sizePrimeIndex];
  162. return mod_1(h, p->prime, p->inv, p->shift);
  163. }
  164. static inline hash_t mod_m2(hash_t h, const Namespace * ns) {
  165. const struct prime_ent *p = &prime_tab[ns->sizePrimeIndex]; // eventually return the namespace object that has been allocated in UA_NodeSet_init
  166. return 1 + mod_1(h, p->prime - 2, p->inv_m2, p->shift);
  167. }
  168. static inline void clear_entry(Namespace * ns, Namespace_Entry * entry) {
  169. if(entry->node == UA_NULL)
  170. return;
  171. switch (entry->node->nodeClass) {
  172. case UA_NODECLASS_OBJECT:
  173. UA_ObjectNode_delete((UA_ObjectNode *) entry->node);
  174. break;
  175. case UA_NODECLASS_VARIABLE:
  176. UA_VariableNode_delete((UA_VariableNode *) entry->node);
  177. break;
  178. case UA_NODECLASS_METHOD:
  179. UA_MethodNode_delete((UA_MethodNode *) entry->node);
  180. break;
  181. case UA_NODECLASS_OBJECTTYPE:
  182. UA_ObjectTypeNode_delete((UA_ObjectTypeNode *) entry->node);
  183. break;
  184. case UA_NODECLASS_VARIABLETYPE:
  185. UA_VariableTypeNode_delete((UA_VariableTypeNode *) entry->node);
  186. break;
  187. case UA_NODECLASS_REFERENCETYPE:
  188. UA_ReferenceTypeNode_delete((UA_ReferenceTypeNode *) entry->node);
  189. break;
  190. case UA_NODECLASS_DATATYPE:
  191. UA_DataTypeNode_delete((UA_DataTypeNode *) entry->node);
  192. break;
  193. case UA_NODECLASS_VIEW:
  194. UA_ViewNode_delete((UA_ViewNode *) entry->node);
  195. break;
  196. default:
  197. break;
  198. }
  199. entry->node = UA_NULL;
  200. ns->count--;
  201. }
  202. /* Returns UA_SUCCESS if an entry was found. Otherwise, UA_ERROR is returned and the "entry"
  203. argument points to the first free entry under the NodeId. */
  204. static inline UA_Int32 find_entry(const Namespace * ns, const UA_NodeId * nodeid, Namespace_Entry ** entry) {
  205. hash_t h = hash(nodeid);
  206. hash_t index = mod(h, ns);
  207. UA_UInt32 size = ns->size;
  208. Namespace_Entry *e = &ns->entries[index];
  209. if(e->node == UA_NULL) {
  210. *entry = e;
  211. return UA_ERROR;
  212. }
  213. if(UA_NodeId_equal(&e->node->nodeId, nodeid) == UA_EQUAL) {
  214. *entry = e;
  215. return UA_SUCCESS;
  216. }
  217. hash_t hash2 = mod_m2(h, ns);
  218. for(;;) {
  219. index += hash2;
  220. if(index >= size)
  221. index -= size;
  222. e = &ns->entries[index];
  223. if(e->node == UA_NULL) {
  224. *entry = e;
  225. return UA_ERROR;
  226. }
  227. if(UA_NodeId_equal(&e->node->nodeId, nodeid) == UA_EQUAL) {
  228. *entry = e;
  229. return UA_SUCCESS;
  230. }
  231. }
  232. /* NOTREACHED */
  233. return UA_SUCCESS;
  234. }
  235. /* The following function changes size of memory allocated for the entries and repeatedly inserts
  236. the table elements. The occupancy of the table after the call will be about 50%. Naturally the
  237. hash table must already exist. Remember also that the place of the table entries is changed. If
  238. memory allocation failures are allowed, this function will return zero, indicating that the
  239. table could not be expanded. If all goes well, it will return a non-zero value. */
  240. static UA_Int32 expand(Namespace * ns) {
  241. Namespace_Entry *nentries;
  242. int32_t nsize;
  243. UA_UInt32 nindex;
  244. Namespace_Entry *oentries = ns->entries;
  245. int32_t osize = ns->size;
  246. Namespace_Entry *olimit = &oentries[osize];
  247. int32_t count = ns->count;
  248. /* Resize only when table after removal of unused elements is either too full or too empty. */
  249. if(count * 2 < osize && (count * 8 > osize || osize <= 32)) {
  250. return UA_SUCCESS;
  251. }
  252. nindex = higher_prime_index(count * 2);
  253. nsize = prime_tab[nindex].prime;
  254. if(UA_alloc((void **)&nentries, sizeof(Namespace_Entry) * nsize) != UA_SUCCESS)
  255. return UA_ERR_NO_MEMORY;
  256. memset(nentries, 0, nsize * sizeof(Namespace_Entry));
  257. ns->entries = nentries;
  258. ns->size = nsize;
  259. ns->sizePrimeIndex = nindex;
  260. Namespace_Entry *p = oentries;
  261. do {
  262. if(p->node != UA_NULL) {
  263. Namespace_Entry *e;
  264. find_entry(ns, &p->node->nodeId, &e); /* We know this returns an empty entry here */
  265. *e = *p;
  266. }
  267. p++;
  268. } while(p < olimit);
  269. UA_free(oentries);
  270. return UA_SUCCESS;
  271. }
  272. /**********************/
  273. /* Exported functions */
  274. /**********************/
  275. UA_Int32 Namespace_new(Namespace ** result, UA_UInt32 size, UA_UInt32 namespaceId) {
  276. Namespace *ns;
  277. if(UA_alloc((void **)&ns, sizeof(Namespace)) != UA_SUCCESS)
  278. return UA_ERR_NO_MEMORY;
  279. UA_UInt32 sizePrimeIndex = higher_prime_index(size);
  280. size = prime_tab[sizePrimeIndex].prime;
  281. if(UA_alloc((void **)&ns->entries, sizeof(Namespace_Entry) * size) != UA_SUCCESS) {
  282. UA_free(ns);
  283. return UA_ERR_NO_MEMORY;
  284. }
  285. /* set entries to zero */
  286. memset(ns->entries, 0, size * sizeof(Namespace_Entry));
  287. *ns = (Namespace) {namespaceId, ns->entries, size, 0, sizePrimeIndex};
  288. *result = ns;
  289. return UA_SUCCESS;
  290. }
  291. static void Namespace_clear(Namespace * ns) {
  292. UA_UInt32 size = ns->size;
  293. Namespace_Entry *entries = ns->entries;
  294. for(UA_UInt32 i = 0; i < size; i++)
  295. clear_entry(ns, &entries[i]);
  296. ns->count = 0;
  297. }
  298. void Namespace_empty(Namespace * ns) {
  299. Namespace_clear(ns);
  300. /* Downsize the table. */
  301. if(ns->size > 1024 * 1024 / sizeof(Namespace_Entry)) {
  302. int nindex = higher_prime_index(1024 / sizeof(Namespace_Entry));
  303. int nsize = prime_tab[nindex].prime;
  304. UA_free(ns->entries);
  305. UA_alloc((void **)&ns->entries, sizeof(Namespace_Entry) * nsize); // FIXME: Check return
  306. // value
  307. ns->size = nsize;
  308. ns->sizePrimeIndex = nindex;
  309. }
  310. }
  311. void Namespace_delete(Namespace * ns) {
  312. Namespace_clear(ns);
  313. UA_free(ns->entries);
  314. UA_free(ns);
  315. }
  316. UA_Int32 Namespace_insert(Namespace * ns, const UA_Node * node) {
  317. if(ns->size * 3 <= ns->count * 4) {
  318. if(expand(ns) != UA_SUCCESS)
  319. return UA_ERROR;
  320. }
  321. Namespace_Entry *entry;
  322. UA_Int32 found = find_entry(ns, &node->nodeId, &entry);
  323. if(found == UA_SUCCESS)
  324. return UA_ERROR; /* There is already an entry for that nodeid */
  325. entry->node = node;
  326. ns->count++;
  327. return UA_SUCCESS;
  328. }
  329. UA_Int32 Namespace_insertUnique(Namespace * ns, UA_Node * node) {
  330. if(ns->size * 3 <= ns->count * 4) {
  331. if(expand(ns) != UA_SUCCESS)
  332. return UA_ERROR;
  333. }
  334. // find unoccupied numeric nodeid
  335. node->nodeId.namespace = ns->namespaceId;
  336. node->nodeId.encodingByte = UA_NODEIDTYPE_NUMERIC;
  337. node->nodeId.identifier.numeric = ns->count;
  338. hash_t h = hash(&node->nodeId);
  339. hash_t hash2 = mod_m2(h, ns);
  340. UA_UInt32 size = ns->size;
  341. // advance integer (hash) until a free entry is found
  342. Namespace_Entry *entry = UA_NULL;
  343. while(1) {
  344. if(find_entry(ns, &node->nodeId, &entry) != UA_SUCCESS)
  345. break;
  346. node->nodeId.identifier.numeric += hash2;
  347. if(node->nodeId.identifier.numeric >= size)
  348. node->nodeId.identifier.numeric -= size;
  349. }
  350. entry->node = node;
  351. ns->count++;
  352. return UA_SUCCESS;
  353. }
  354. UA_Int32 Namespace_contains(const Namespace * ns, const UA_NodeId * nodeid) {
  355. Namespace_Entry *entry;
  356. return (find_entry(ns, nodeid, &entry) == UA_SUCCESS ? UA_TRUE : UA_FALSE);
  357. }
  358. UA_Int32 Namespace_get(Namespace const *ns, const UA_NodeId * nodeid, const UA_Node **result,
  359. Namespace_Entry_Lock ** lock) {
  360. Namespace_Entry *entry;
  361. if(find_entry(ns, nodeid, &entry) != UA_SUCCESS)
  362. return UA_ERROR;
  363. *result = entry->node;
  364. return UA_SUCCESS;
  365. }
  366. UA_Int32 Namespace_remove(Namespace * ns, const UA_NodeId * nodeid) {
  367. Namespace_Entry *entry;
  368. if(find_entry(ns, nodeid, &entry) != UA_SUCCESS)
  369. return UA_ERROR;
  370. // TODO: Check if deleting the node makes the Namespace inconsistent.
  371. clear_entry(ns, entry);
  372. /* Downsize the hashmap if it is very empty */
  373. if(ns->count * 8 < ns->size && ns->size > 32)
  374. expand(ns);
  375. return UA_SUCCESS;
  376. }
  377. UA_Int32 Namespace_iterate(const Namespace * ns, Namespace_nodeVisitor visitor) {
  378. if(visitor == UA_NULL) return UA_SUCCESS;
  379. for(UA_UInt32 i = 0; i < ns->size; i++) {
  380. Namespace_Entry *entry = &ns->entries[i];
  381. if(entry->node != UA_NULL)
  382. visitor(entry->node);
  383. }
  384. return UA_SUCCESS;
  385. }
  386. void Namespace_Entry_Lock_release(Namespace_Entry_Lock * lock) {
  387. ;
  388. }