Przeglądaj źródła

Merge branch 'master' into typedescription

Julius Pfrommer 10 lat temu
rodzic
commit
d958465d82

+ 7 - 2
.travis.yml

@@ -17,6 +17,7 @@ addons:
     build_command: make
     branch_pattern: coverity_scan
 before_install:
+   - sudo apt-get install binutils-mingw-w64-i686 gcc-mingw-w64-i686
    - sudo add-apt-repository ppa:ubuntu-toolchain-r/test -y
    - sudo add-apt-repository ppa:kalakris/cmake -y
    - sudo apt-get update -qq
@@ -33,12 +34,16 @@ script:
    - echo "Testing builds"
    - mkdir -p build
    - cd build
+   - echo "Cross compile build for MinGW"
+   - cmake -DCMAKE_TOOLCHAIN_FILE=../Toolchain-mingw32.cmake -DEXAMPLESERVER=ON ..
+   - make
+   - cd .. && rm build -rf && mkdir -p build && cd build
    - echo "Only needed profile"
    - cmake -DTYPES_ONLY_NEEDED=ON ..
    - make
    - cd .. && rm build -rf && mkdir -p build && cd build
-   - cmake -DGENERATE_DOCUMENTATION=ON .. 
    - echo "Production build"
+   - cmake -DGENERATE_DOCUMENTATION=ON .. 
    - make
    - echo "Generate documentation"
    - make doc
@@ -51,4 +56,4 @@ script:
 
 after_success:
    - ./tools/.deployDoxygen.sh
-   - ./tools/.coverity.sh
+   - ./tools/.coverity.sh

+ 1 - 1
CMakeLists.txt

@@ -21,7 +21,6 @@ file(GLOB_RECURSE exported_headers "${CMAKE_CURRENT_SOURCE_DIR}/include/*.h")
 file(GLOB_RECURSE headers "${CMAKE_CURRENT_SOURCE_DIR}/src/*.h")
 file(GLOB generated_headers "${PROJECT_BINARY_DIR}/src_generated/*.h")
 set(lib_sources src/ua_types.c
-                src/ua_util.c
                 src/ua_types_encoding_binary.c
                 ${PROJECT_BINARY_DIR}/src_generated/ua_types_generated.c
                 ${PROJECT_BINARY_DIR}/src_generated/ua_namespace_0.c
@@ -29,6 +28,7 @@ set(lib_sources src/ua_types.c
                 ${PROJECT_BINARY_DIR}/src_generated/ua_transport_generated.c
                 src/ua_securechannel.c
                 src/ua_session.c
+                src/client/ua_client.c
                 src/server/ua_server.c
 				src/server/ua_server_addressspace.c
 				src/server/ua_server_binary.c

+ 52 - 0
Toolchain-mingw32.cmake

@@ -0,0 +1,52 @@
+# the name of the target operating system
+set(CMAKE_SYSTEM_NAME Windows)
+
+#remove the runtime dependency for libgcc_s_sjlj-1.dll
+set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -static-libgcc")
+
+# Which compilers to use for C and C++, and location of target
+# environment.
+if(EXISTS /usr/i686-w64-mingw32)
+# First look in standard location as used by Debian/Ubuntu/etc.
+set(CMAKE_C_COMPILER i686-w64-mingw32-gcc)
+set(CMAKE_CXX_COMPILER i686-w64-mingw32-g++)
+set(CMAKE_RC_COMPILER i686-w64-mingw32-windres)
+set(CMAKE_AR:FILEPATH /usr/bin/i686-w64-mingw32-ar)
+set(CMAKE_FIND_ROOT_PATH /usr/i686-w64-mingw32)
+elseif(EXISTS /usr/i586-mingw32msvc)
+# First look in standard location as used by Debian/Ubuntu/etc.
+set(CMAKE_C_COMPILER i586-mingw32msvc-gcc)
+set(CMAKE_CXX_COMPILER i586-mingw32msvc-g++)
+set(CMAKE_RC_COMPILER i586-mingw32msvc-windres)
+set(CMAKE_FIND_ROOT_PATH /usr/i586-mingw32msvc)
+elseif(EXISTS /opt/mingw)
+# You can get a MinGW environment using the script at <http://mxe.cc>.
+# It downloads and builds MinGW and most of the dependencies for you.
+# You can use the toolchain file generated by MXE called `mxe-conf.cmake'
+# or you can use this file by adjusting the above and following paths.
+set(CMAKE_C_COMPILER /opt/mingw/usr/bin/i686-pc-mingw32-gcc)
+set(CMAKE_CXX_COMPILER /opt/mingw/usr/bin/i686-pc-mingw32-g++)
+set(CMAKE_RC_COMPILER /opt/mingw/usr/bin/i686-pc-mingw32-windres)
+set(CMAKE_FIND_ROOT_PATH /opt/mingw/usr/i686-pc-mingw32)
+else()
+# Else fill in local path which the user will likely adjust.
+# This is the location assumed by <http://www.libsdl.org/extras/win32/cross/>
+set(CMAKE_C_COMPILER /usr/local/cross-tools/bin/i386-mingw32-gcc)
+set(CMAKE_CXX_COMPILER /usr/local/cross-tools/bin/i386-mingw32-g++)
+set(CMAKE_RC_COMPILER /usr/local/cross-tools/bin/i386-mingw32-windres)
+set(CMAKE_FIND_ROOT_PATH /usr/local/cross-tools)
+endif() 
+
+# Adjust the default behaviour of the FIND_XXX() commands:
+# search headers and libraries in the target environment, search
+# programs in the host environment
+set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
+# Tell pkg-config not to look at the target environment's .pc files.
+# Setting PKG_CONFIG_LIBDIR sets the default search directory, but we have to
+# set PKG_CONFIG_PATH as well to prevent pkg-config falling back to the host's
+# path.
+set(ENV{PKG_CONFIG_LIBDIR} ${CMAKE_FIND_ROOT_PATH}/lib/pkgconfig)
+set(ENV{PKG_CONFIG_PATH} ${CMAKE_FIND_ROOT_PATH}/lib/pkgconfig)
+set(ENV{MINGDIR} ${CMAKE_FIND_ROOT_PATH}) 

+ 166 - 30
examples/networklayer_tcp.c

@@ -8,7 +8,7 @@
 #include <malloc.h>
 #include <winsock2.h>
 #include <sys/types.h>
-#include <Windows.h>
+#include <windows.h>
 #include <ws2tcpip.h>
 #define CLOSESOCKET(S) closesocket(S)
 #else
@@ -31,39 +31,54 @@
 #include <urcu/uatomic.h>
 #endif
 
-#define MAXBACKLOG 100
-
-struct Networklayer_TCP;
-
-/* Forwarded to the server as a (UA_Connection) and used for callbacks back into
-   the networklayer */
+/* Forwarded as a (UA_Connection) and used for callbacks back into the
+   networklayer */
 typedef struct {
 	UA_Connection connection;
 	UA_Int32 sockfd;
-	struct NetworkLayerTCP *layer;
+	void *layer;
 } TCPConnection;
 
+/***************************/
+/* Server NetworkLayer TCP */
+/***************************/
+
+#define MAXBACKLOG 100
+
 /* Internal mapping of sockets to connections */
 typedef struct {
     TCPConnection *connection;
-    UA_Int32 sockfd;
+#ifdef _WIN32
+	UA_UInt32 sockfd;
+#else
+	UA_Int32 sockfd;
+#endif
 } ConnectionLink;
 
-typedef struct NetworkLayerTCP {
+typedef struct {
 	UA_ConnectionConfig conf;
 	fd_set fdset;
+#ifdef _WIN32
+	UA_UInt32 serversockfd;
+	UA_UInt32 highestfd;
+#else
 	UA_Int32 serversockfd;
 	UA_Int32 highestfd;
+#endif
     UA_UInt16 conLinksSize;
     ConnectionLink *conLinks;
     UA_UInt32 port;
     /* We remove the connection links only in the main thread. Attach
        to-be-deleted links with atomic operations */
     struct deleteLink {
-        UA_Int32 sockfd;
+#ifdef _WIN32
+		UA_UInt32 sockfd;
+#else
+		UA_Int32 sockfd;
+#endif
         struct deleteLink *next;
     } *deleteLinkList;
-} NetworkLayerTCP;
+} ServerNetworkLayerTCP;
 
 static UA_StatusCode setNonBlocking(int sockid) {
 #ifdef _WIN32
@@ -83,7 +98,7 @@ static void freeConnectionCallback(UA_Server *server, TCPConnection *connection)
 }
 
 // after every select, reset the set of sockets we want to listen on
-static void setFDSet(NetworkLayerTCP *layer) {
+static void setFDSet(ServerNetworkLayerTCP *layer) {
 	FD_ZERO(&layer->fdset);
 	FD_SET(layer->serversockfd, &layer->fdset);
 	layer->highestfd = layer->serversockfd;
@@ -98,7 +113,7 @@ static void setFDSet(NetworkLayerTCP *layer) {
 void closeConnection(TCPConnection *handle);
 void writeCallback(TCPConnection *handle, UA_ByteStringArray gather_buf);
 
-static UA_StatusCode NetworkLayerTCP_add(NetworkLayerTCP *layer, UA_Int32 newsockfd) {
+static UA_StatusCode ServerNetworkLayerTCP_add(ServerNetworkLayerTCP *layer, UA_Int32 newsockfd) {
     setNonBlocking(newsockfd);
     TCPConnection *c = malloc(sizeof(TCPConnection));
 	if(!c)
@@ -123,7 +138,7 @@ static UA_StatusCode NetworkLayerTCP_add(NetworkLayerTCP *layer, UA_Int32 newsoc
 }
 
 // Takes the linked list of closed connections and returns the work for the server loop
-static UA_UInt32 batchDeleteLinks(NetworkLayerTCP *layer, UA_WorkItem **returnWork) {
+static UA_UInt32 batchDeleteLinks(ServerNetworkLayerTCP *layer, UA_WorkItem **returnWork) {
     UA_WorkItem *work = malloc(sizeof(UA_WorkItem)*layer->conLinksSize);
 	if (!work) {
 		*returnWork = NULL;
@@ -194,8 +209,9 @@ void closeConnection(TCPConnection *handle) {
 
     // Remove the link later in the main thread
     d->sockfd = handle->sockfd;
-    d->next = handle->layer->deleteLinkList;
-    handle->layer->deleteLinkList = d;
+    ServerNetworkLayerTCP *layer = (ServerNetworkLayerTCP*)handle->layer;
+    d->next = layer->deleteLinkList;
+    layer->deleteLinkList = d;
 }
 #endif
 
@@ -206,7 +222,7 @@ void writeCallback(TCPConnection *handle, UA_ByteStringArray gather_buf) {
 	LPWSABUF buf = _alloca(gather_buf.stringsSize * sizeof(WSABUF));
 	int result = 0;
 	for(UA_UInt32 i = 0; i<gather_buf.stringsSize; i++) {
-		buf[i].buf = gather_buf.strings[i].data;
+		buf[i].buf = (char*)gather_buf.strings[i].data;
 		buf[i].len = gather_buf.strings[i].length;
 		total_len += gather_buf.strings[i].length;
 	}
@@ -240,7 +256,7 @@ void writeCallback(TCPConnection *handle, UA_ByteStringArray gather_buf) {
 #endif
 }
 
-static UA_StatusCode NetworkLayerTCP_start(NetworkLayerTCP *layer) {
+static UA_StatusCode ServerNetworkLayerTCP_start(ServerNetworkLayerTCP *layer) {
 #ifdef _WIN32
 	WORD wVersionRequested;
 	WSADATA wsaData;
@@ -285,7 +301,7 @@ static UA_StatusCode NetworkLayerTCP_start(NetworkLayerTCP *layer) {
     return UA_STATUSCODE_GOOD;
 }
 
-static UA_Int32 NetworkLayerTCP_getWork(NetworkLayerTCP *layer, UA_WorkItem **workItems,
+static UA_Int32 ServerNetworkLayerTCP_getWork(ServerNetworkLayerTCP *layer, UA_WorkItem **workItems,
                                         UA_UInt16 timeout) {
     UA_WorkItem *items = UA_NULL;
     UA_Int32 itemsCount = batchDeleteLinks(layer, &items);
@@ -305,7 +321,7 @@ static UA_Int32 NetworkLayerTCP_getWork(NetworkLayerTCP *layer, UA_WorkItem **wo
 		socklen_t cli_len = sizeof(cli_addr);
 		int newsockfd = accept(layer->serversockfd, (struct sockaddr *) &cli_addr, &cli_len);
 		if (newsockfd >= 0)
-			NetworkLayerTCP_add(layer, newsockfd);
+			ServerNetworkLayerTCP_add(layer, newsockfd);
 	}
     
     items = realloc(items, sizeof(UA_WorkItem)*(itemsCount+resultsize));
@@ -351,7 +367,7 @@ static UA_Int32 NetworkLayerTCP_getWork(NetworkLayerTCP *layer, UA_WorkItem **wo
     return j;
 }
 
-static UA_Int32 NetworkLayerTCP_stop(NetworkLayerTCP * layer, UA_WorkItem **workItems) {
+static UA_Int32 ServerNetworkLayerTCP_stop(ServerNetworkLayerTCP * layer, UA_WorkItem **workItems) {
 	for(UA_Int32 index = 0;index < layer->conLinksSize;index++)
         closeConnection(layer->conLinks[index].connection);
 #ifdef _WIN32
@@ -360,24 +376,144 @@ static UA_Int32 NetworkLayerTCP_stop(NetworkLayerTCP * layer, UA_WorkItem **work
     return batchDeleteLinks(layer, workItems);
 }
 
-static void NetworkLayerTCP_delete(NetworkLayerTCP *layer) {
+static void ServerNetworkLayerTCP_delete(ServerNetworkLayerTCP *layer) {
 	free(layer->conLinks);
 	free(layer);
 }
 
-UA_NetworkLayer NetworkLayerTCP_new(UA_ConnectionConfig conf, UA_UInt32 port) {
-    NetworkLayerTCP *tcplayer = malloc(sizeof(NetworkLayerTCP));
+UA_ServerNetworkLayer ServerNetworkLayerTCP_new(UA_ConnectionConfig conf, UA_UInt32 port) {
+    ServerNetworkLayerTCP *tcplayer = malloc(sizeof(ServerNetworkLayerTCP));
 	tcplayer->conf = conf;
 	tcplayer->conLinksSize = 0;
 	tcplayer->conLinks = NULL;
     tcplayer->port = port;
     tcplayer->deleteLinkList = UA_NULL;
 
-    UA_NetworkLayer nl;
+    UA_ServerNetworkLayer nl;
     nl.nlHandle = tcplayer;
-    nl.start = (UA_StatusCode (*)(void*))NetworkLayerTCP_start;
-    nl.getWork = (UA_Int32 (*)(void*, UA_WorkItem**, UA_UInt16)) NetworkLayerTCP_getWork;
-    nl.stop = (UA_Int32 (*)(void*, UA_WorkItem**)) NetworkLayerTCP_stop;
-    nl.delete = (void (*)(void*))NetworkLayerTCP_delete;
+    nl.start = (UA_StatusCode (*)(void*))ServerNetworkLayerTCP_start;
+    nl.getWork = (UA_Int32 (*)(void*, UA_WorkItem**, UA_UInt16))ServerNetworkLayerTCP_getWork;
+    nl.stop = (UA_Int32 (*)(void*, UA_WorkItem**))ServerNetworkLayerTCP_stop;
+    nl.free = (void (*)(void*))ServerNetworkLayerTCP_delete;
     return nl;
 }
+
+/***************************/
+/* Client NetworkLayer TCP */
+/***************************/
+
+static UA_StatusCode ClientNetworkLayerTCP_connect(const UA_String endpointUrl, void **resultHandle) { 
+    if(endpointUrl.length < 11 || endpointUrl.length >= 512) {
+        printf("server url size invalid");
+        return UA_STATUSCODE_BADINTERNALERROR;
+    }
+
+    if(strncmp((char*)endpointUrl.data, "opc.tcp://", 10) != 0) {
+        printf("server url does not begin with opc.tcp://");
+        return UA_STATUSCODE_BADINTERNALERROR;
+    }
+
+    UA_UInt16 portpos = 9;
+    UA_UInt16 port = 0;
+    for(;portpos < endpointUrl.length; portpos++) {
+        if(endpointUrl.data[portpos] == ':') {
+            port = atoi((char*)&endpointUrl.data[portpos+1]);
+            break;
+        }
+    }
+    if(port == 0) {
+        printf("port invalid");
+        return UA_STATUSCODE_BADINTERNALERROR;
+    }
+    
+    char hostname[512];
+    for(int i=10; i < portpos; i++)
+        hostname[i-10] = endpointUrl.data[i];
+    hostname[portpos-10] = 0;
+
+    UA_Int32 *sock = UA_Int32_new();
+    if(!sock)
+        return UA_STATUSCODE_BADOUTOFMEMORY;
+    if((*sock = socket(AF_INET, SOCK_STREAM, 0)) == -1) {
+        free(sock);
+		printf("Could not create socket");
+        return UA_STATUSCODE_BADINTERNALERROR;
+    }
+
+	struct sockaddr_in server;
+	server.sin_addr.s_addr = inet_addr(hostname);
+	server.sin_family = AF_INET;
+	server.sin_port = port;
+
+	if(connect(*sock, (struct sockaddr *) &server, sizeof(server)) < 0) {
+        free(sock);
+        printf("Connect failed.");
+        return UA_STATUSCODE_BADINTERNALERROR;
+    }
+
+    if(setNonBlocking(*sock) != UA_STATUSCODE_GOOD) {
+        free(sock);
+        printf("Could not switch to nonblocking.");
+        return UA_STATUSCODE_BADINTERNALERROR;
+    }
+
+    *resultHandle = sock;
+    return UA_STATUSCODE_GOOD;
+}
+
+static void ClientNetworkLayerTCP_disconnect(UA_Int32 *handle) {
+    close(*handle);
+    free(handle);
+}
+
+static UA_StatusCode ClientNetworkLayerTCP_send(UA_Int32 *handle, UA_ByteStringArray gather_buf) {
+	struct iovec iov[gather_buf.stringsSize];
+    int total_len = 0;
+	for(UA_UInt32 i=0;i<gather_buf.stringsSize;i++) {
+		iov[i] = (struct iovec) {.iov_base = gather_buf.strings[i].data,
+                                 .iov_len = gather_buf.strings[i].length};
+		total_len += gather_buf.strings[i].length;
+	}
+	struct msghdr message = {.msg_name = NULL, .msg_namelen = 0, .msg_iov = iov,
+							 .msg_iovlen = gather_buf.stringsSize, .msg_control = NULL,
+							 .msg_controllen = 0, .msg_flags = 0};
+    int nWritten = 0;
+	while (nWritten < total_len) {
+        int n = sendmsg(*handle, &message, 0);
+        if(n <= -1)
+            return UA_STATUSCODE_BADINTERNALERROR;
+        nWritten += n;
+	}
+    return UA_STATUSCODE_GOOD;
+}
+
+static UA_StatusCode ClientNetworkLayerTCP_awaitResponse(UA_Int32 *handle, UA_ByteString *response,
+                                                         UA_UInt32 timeout) {
+    fd_set read_fds;
+    FD_ZERO(&read_fds);
+    struct timeval tmptv = {0, timeout};
+    int ret = select(*handle+1, &read_fds, NULL, NULL, &tmptv);
+    if(ret <= -1)
+        return UA_STATUSCODE_BADINTERNALERROR;
+    if(ret == 0)
+        return UA_STATUSCODE_BADTIMEOUT;
+
+    ret = recv(*handle, response->data, response->length, 0);
+
+    if(ret <= -1)
+        return UA_STATUSCODE_BADINTERNALERROR;
+    if(ret == 0)
+        return UA_STATUSCODE_BADSERVERNOTCONNECTED;
+
+    response->length = ret;
+    return UA_STATUSCODE_GOOD;
+}
+
+UA_ClientNetworkLayer ClientNetworkLayerTCP_new(UA_ConnectionConfig conf) {
+    UA_ClientNetworkLayer layer;
+    layer.connect = (UA_StatusCode (*)(const UA_String, void**)) ClientNetworkLayerTCP_connect;
+    layer.disconnect = (void (*)(void*)) ClientNetworkLayerTCP_disconnect;
+    layer.send = (UA_StatusCode (*)(void*, UA_ByteStringArray)) ClientNetworkLayerTCP_send;
+    layer.awaitResponse = (UA_StatusCode (*)(void*, UA_ByteString *, UA_UInt32))ClientNetworkLayerTCP_awaitResponse;
+    return layer;
+}

+ 3 - 1
examples/networklayer_tcp.h

@@ -11,9 +11,11 @@ extern "C" {
 #endif
 
 #include "ua_server.h"
+#include "ua_client.h"
 
 /** @brief Create the TCP networklayer and listen to the specified port */
-UA_NetworkLayer NetworkLayerTCP_new(UA_ConnectionConfig conf, UA_UInt32 port);
+UA_ServerNetworkLayer ServerNetworkLayerTCP_new(UA_ConnectionConfig conf, UA_UInt32 port);
+UA_ClientNetworkLayer ClientNetworkLayerTCP_new(UA_ConnectionConfig conf);
 
 #ifdef __cplusplus
 } // extern "C"

+ 13 - 4
examples/networklayer_udp.c

@@ -8,7 +8,7 @@
 #include <malloc.h>
 #include <winsock2.h>
 #include <sys/types.h>
-#include <Windows.h>
+#include <windows.h>
 #include <ws2tcpip.h>
 #define CLOSESOCKET(S) closesocket(S)
 #else
@@ -48,7 +48,11 @@ typedef struct {
 typedef struct NetworkLayerUDP {
 	UA_ConnectionConfig conf;
 	fd_set fdset;
+#ifdef _WIN32
+	UA_UInt32 serversockfd;
+#else
 	UA_Int32 serversockfd;
+#endif
     UA_UInt32 port;
 } NetworkLayerUDP;
 
@@ -81,24 +85,29 @@ void writeCallbackUDP(UDPConnection *handle, UA_ByteStringArray gather_buf);
 void writeCallbackUDP(UDPConnection *handle, UA_ByteStringArray gather_buf) {
 	UA_UInt32 total_len = 0, nWritten = 0;
 #ifdef _WIN32
+	/*
 	LPWSABUF buf = _alloca(gather_buf.stringsSize * sizeof(WSABUF));
 	int result = 0;
 	for(UA_UInt32 i = 0; i<gather_buf.stringsSize; i++) {
-		buf[i].buf = gather_buf.strings[i].data;
+		buf[i].buf = (char*)gather_buf.strings[i].data;
 		buf[i].len = gather_buf.strings[i].length;
 		total_len += gather_buf.strings[i].length;
 	}
 	while(nWritten < total_len) {
 		UA_UInt32 n = 0;
 		do {
-			result = WSASendto(handle->sockfd, buf, gather_buf.stringsSize ,
-                             (LPDWORD)&n, 0, NULL, NULL);
+			result = WSASendto(handle->layer->serversockfd, buf, gather_buf.stringsSize ,
+                             (LPDWORD)&n, 0, 
+                             handle->from, handle->fromlen,
+                             NULL, NULL);
 			//FIXME:
 			if(result != 0)
 				printf("Error WSASend, code: %d \n", WSAGetLastError());
 		} while(errno == EINTR);
 		nWritten += n;
 	}
+	*/
+	#error fixme: udp not yet implemented for windows
 #else
 	struct iovec iov[gather_buf.stringsSize];
 	for(UA_UInt32 i=0;i<gather_buf.stringsSize;i++) {

+ 1 - 1
examples/opcuaServer.c

@@ -67,7 +67,7 @@ int main(int argc, char** argv) {
 #ifdef EXTENSION_UDP
     UA_Server_addNetworkLayer(server, NetworkLayerUDP_new(UA_ConnectionConfig_standard, 16664));
 #else
-    UA_Server_addNetworkLayer(server, NetworkLayerTCP_new(UA_ConnectionConfig_standard, 16664));
+    UA_Server_addNetworkLayer(server, ServerNetworkLayerTCP_new(UA_ConnectionConfig_standard, 16664));
 #endif
 
 

+ 655 - 0
include/queue.h

@@ -0,0 +1,655 @@
+/*	$OpenBSD: queue.h,v 1.38 2013/07/03 15:05:21 fgsch Exp $	*/
+/*	$NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $	*/
+
+/*
+ * Copyright (c) 1991, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)queue.h	8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef	_SYS_QUEUE_H_
+#define	_SYS_QUEUE_H_
+
+/*
+ * This file defines five types of data structures: singly-linked lists, 
+ * lists, simple queues, tail queues, and circular queues.
+ *
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction.  Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+#if defined(QUEUE_MACRO_DEBUG) || (defined(_KERNEL) && defined(DIAGNOSTIC))
+#define _Q_INVALIDATE(a) (a) = ((void *)-1)
+#else
+#define _Q_INVALIDATE(a)
+#endif
+
+/*
+ * Singly-linked List definitions.
+ */
+#define SLIST_HEAD(name, type)						\
+struct name {								\
+	struct type *slh_first;	/* first element */			\
+}
+ 
+#define	SLIST_HEAD_INITIALIZER(head)					\
+	{ NULL }
+ 
+
+/*changes by Stasik0 to avoid conflicts with winnt.h*/
+#ifdef SLIST_ENTRY
+#undef SLIST_ENTRY
+#endif
+/**/
+
+#define SLIST_ENTRY(type)						\
+struct {								\
+	struct type *sle_next;	/* next element */			\
+}
+ 
+/*
+ * Singly-linked List access methods.
+ */
+#define	SLIST_FIRST(head)	((head)->slh_first)
+#define	SLIST_END(head)		NULL
+#define	SLIST_EMPTY(head)	(SLIST_FIRST(head) == SLIST_END(head))
+#define	SLIST_NEXT(elm, field)	((elm)->field.sle_next)
+
+#define	SLIST_FOREACH(var, head, field)					\
+	for((var) = SLIST_FIRST(head);					\
+	    (var) != SLIST_END(head);					\
+	    (var) = SLIST_NEXT(var, field))
+
+#define	SLIST_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = SLIST_FIRST(head);				\
+	    (var) && ((tvar) = SLIST_NEXT(var, field), 1);		\
+	    (var) = (tvar))
+
+/*
+ * Singly-linked List functions.
+ */
+#define	SLIST_INIT(head) {						\
+	SLIST_FIRST(head) = SLIST_END(head);				\
+}
+
+#define	SLIST_INSERT_AFTER(slistelm, elm, field) do {			\
+	(elm)->field.sle_next = (slistelm)->field.sle_next;		\
+	(slistelm)->field.sle_next = (elm);				\
+} while (0)
+
+#define	SLIST_INSERT_HEAD(head, elm, field) do {			\
+	(elm)->field.sle_next = (head)->slh_first;			\
+	(head)->slh_first = (elm);					\
+} while (0)
+
+#define	SLIST_REMOVE_AFTER(elm, field) do {				\
+	(elm)->field.sle_next = (elm)->field.sle_next->field.sle_next;	\
+} while (0)
+
+#define	SLIST_REMOVE_HEAD(head, field) do {				\
+	(head)->slh_first = (head)->slh_first->field.sle_next;		\
+} while (0)
+
+#define SLIST_REMOVE(head, elm, type, field) do {			\
+	if ((head)->slh_first == (elm)) {				\
+		SLIST_REMOVE_HEAD((head), field);			\
+	} else {							\
+		struct type *curelm = (head)->slh_first;		\
+									\
+		while (curelm->field.sle_next != (elm))			\
+			curelm = curelm->field.sle_next;		\
+		curelm->field.sle_next =				\
+		    curelm->field.sle_next->field.sle_next;		\
+		_Q_INVALIDATE((elm)->field.sle_next);			\
+	}								\
+} while (0)
+
+/*
+ * List definitions.
+ */
+#define LIST_HEAD(name, type)						\
+struct name {								\
+	struct type *lh_first;	/* first element */			\
+}
+
+#define LIST_HEAD_INITIALIZER(head)					\
+	{ NULL }
+
+#define LIST_ENTRY(type)						\
+struct {								\
+	struct type *le_next;	/* next element */			\
+	struct type **le_prev;	/* address of previous next element */	\
+}
+
+/*
+ * List access methods
+ */
+#define	LIST_FIRST(head)		((head)->lh_first)
+#define	LIST_END(head)			NULL
+#define	LIST_EMPTY(head)		(LIST_FIRST(head) == LIST_END(head))
+#define	LIST_NEXT(elm, field)		((elm)->field.le_next)
+
+#define LIST_FOREACH(var, head, field)					\
+	for((var) = LIST_FIRST(head);					\
+	    (var)!= LIST_END(head);					\
+	    (var) = LIST_NEXT(var, field))
+
+#define	LIST_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = LIST_FIRST(head);				\
+	    (var) && ((tvar) = LIST_NEXT(var, field), 1);		\
+	    (var) = (tvar))
+
+/*
+ * List functions.
+ */
+#define	LIST_INIT(head) do {						\
+	LIST_FIRST(head) = LIST_END(head);				\
+} while (0)
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do {			\
+	if (((elm)->field.le_next = (listelm)->field.le_next) != NULL)	\
+		(listelm)->field.le_next->field.le_prev =		\
+		    &(elm)->field.le_next;				\
+	(listelm)->field.le_next = (elm);				\
+	(elm)->field.le_prev = &(listelm)->field.le_next;		\
+} while (0)
+
+#define	LIST_INSERT_BEFORE(listelm, elm, field) do {			\
+	(elm)->field.le_prev = (listelm)->field.le_prev;		\
+	(elm)->field.le_next = (listelm);				\
+	*(listelm)->field.le_prev = (elm);				\
+	(listelm)->field.le_prev = &(elm)->field.le_next;		\
+} while (0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do {				\
+	if (((elm)->field.le_next = (head)->lh_first) != NULL)		\
+		(head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+	(head)->lh_first = (elm);					\
+	(elm)->field.le_prev = &(head)->lh_first;			\
+} while (0)
+
+#define LIST_REMOVE(elm, field) do {					\
+	if ((elm)->field.le_next != NULL)				\
+		(elm)->field.le_next->field.le_prev =			\
+		    (elm)->field.le_prev;				\
+	*(elm)->field.le_prev = (elm)->field.le_next;			\
+	_Q_INVALIDATE((elm)->field.le_prev);				\
+	_Q_INVALIDATE((elm)->field.le_next);				\
+} while (0)
+
+#define LIST_REPLACE(elm, elm2, field) do {				\
+	if (((elm2)->field.le_next = (elm)->field.le_next) != NULL)	\
+		(elm2)->field.le_next->field.le_prev =			\
+		    &(elm2)->field.le_next;				\
+	(elm2)->field.le_prev = (elm)->field.le_prev;			\
+	*(elm2)->field.le_prev = (elm2);				\
+	_Q_INVALIDATE((elm)->field.le_prev);				\
+	_Q_INVALIDATE((elm)->field.le_next);				\
+} while (0)
+
+/*
+ * Simple queue definitions.
+ */
+#define SIMPLEQ_HEAD(name, type)					\
+struct name {								\
+	struct type *sqh_first;	/* first element */			\
+	struct type **sqh_last;	/* addr of last next element */		\
+}
+
+#define SIMPLEQ_HEAD_INITIALIZER(head)					\
+	{ NULL, &(head).sqh_first }
+
+#define SIMPLEQ_ENTRY(type)						\
+struct {								\
+	struct type *sqe_next;	/* next element */			\
+}
+
+/*
+ * Simple queue access methods.
+ */
+#define	SIMPLEQ_FIRST(head)	    ((head)->sqh_first)
+#define	SIMPLEQ_END(head)	    NULL
+#define	SIMPLEQ_EMPTY(head)	    (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head))
+#define	SIMPLEQ_NEXT(elm, field)    ((elm)->field.sqe_next)
+
+#define SIMPLEQ_FOREACH(var, head, field)				\
+	for((var) = SIMPLEQ_FIRST(head);				\
+	    (var) != SIMPLEQ_END(head);					\
+	    (var) = SIMPLEQ_NEXT(var, field))
+
+#define	SIMPLEQ_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = SIMPLEQ_FIRST(head);				\
+	    (var) && ((tvar) = SIMPLEQ_NEXT(var, field), 1);		\
+	    (var) = (tvar))
+
+/*
+ * Simple queue functions.
+ */
+#define	SIMPLEQ_INIT(head) do {						\
+	(head)->sqh_first = NULL;					\
+	(head)->sqh_last = &(head)->sqh_first;				\
+} while (0)
+
+#define SIMPLEQ_INSERT_HEAD(head, elm, field) do {			\
+	if (((elm)->field.sqe_next = (head)->sqh_first) == NULL)	\
+		(head)->sqh_last = &(elm)->field.sqe_next;		\
+	(head)->sqh_first = (elm);					\
+} while (0)
+
+#define SIMPLEQ_INSERT_TAIL(head, elm, field) do {			\
+	(elm)->field.sqe_next = NULL;					\
+	*(head)->sqh_last = (elm);					\
+	(head)->sqh_last = &(elm)->field.sqe_next;			\
+} while (0)
+
+#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
+		(head)->sqh_last = &(elm)->field.sqe_next;		\
+	(listelm)->field.sqe_next = (elm);				\
+} while (0)
+
+#define SIMPLEQ_REMOVE_HEAD(head, field) do {			\
+	if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
+		(head)->sqh_last = &(head)->sqh_first;			\
+} while (0)
+
+#define SIMPLEQ_REMOVE_AFTER(head, elm, field) do {			\
+	if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \
+	    == NULL)							\
+		(head)->sqh_last = &(elm)->field.sqe_next;		\
+} while (0)
+
+/*
+ * XOR Simple queue definitions.
+ */
+#define XSIMPLEQ_HEAD(name, type)					\
+struct name {								\
+	struct type *sqx_first;	/* first element */			\
+	struct type **sqx_last;	/* addr of last next element */		\
+	unsigned long sqx_cookie;					\
+}
+
+#define XSIMPLEQ_ENTRY(type)						\
+struct {								\
+	struct type *sqx_next;	/* next element */			\
+}
+
+/*
+ * XOR Simple queue access methods.
+ */
+#define XSIMPLEQ_XOR(head, ptr)	    ((__typeof(ptr))((head)->sqx_cookie ^ \
+					(unsigned long)(ptr)))
+#define	XSIMPLEQ_FIRST(head)	    XSIMPLEQ_XOR(head, ((head)->sqx_first))
+#define	XSIMPLEQ_END(head)	    NULL
+#define	XSIMPLEQ_EMPTY(head)	    (XSIMPLEQ_FIRST(head) == XSIMPLEQ_END(head))
+#define	XSIMPLEQ_NEXT(head, elm, field)    XSIMPLEQ_XOR(head, ((elm)->field.sqx_next))
+
+
+#define XSIMPLEQ_FOREACH(var, head, field)				\
+	for ((var) = XSIMPLEQ_FIRST(head);				\
+	    (var) != XSIMPLEQ_END(head);				\
+	    (var) = XSIMPLEQ_NEXT(head, var, field))
+
+#define	XSIMPLEQ_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = XSIMPLEQ_FIRST(head);				\
+	    (var) && ((tvar) = XSIMPLEQ_NEXT(head, var, field), 1);	\
+	    (var) = (tvar))
+
+/*
+ * XOR Simple queue functions.
+ */
+#define	XSIMPLEQ_INIT(head) do {					\
+	arc4random_buf(&(head)->sqx_cookie, sizeof((head)->sqx_cookie)); \
+	(head)->sqx_first = XSIMPLEQ_XOR(head, NULL);			\
+	(head)->sqx_last = XSIMPLEQ_XOR(head, &(head)->sqx_first);	\
+} while (0)
+
+#define XSIMPLEQ_INSERT_HEAD(head, elm, field) do {			\
+	if (((elm)->field.sqx_next = (head)->sqx_first) ==		\
+	    XSIMPLEQ_XOR(head, NULL))					\
+		(head)->sqx_last = XSIMPLEQ_XOR(head, &(elm)->field.sqx_next); \
+	(head)->sqx_first = XSIMPLEQ_XOR(head, (elm));			\
+} while (0)
+
+#define XSIMPLEQ_INSERT_TAIL(head, elm, field) do {			\
+	(elm)->field.sqx_next = XSIMPLEQ_XOR(head, NULL);		\
+	*(XSIMPLEQ_XOR(head, (head)->sqx_last)) = XSIMPLEQ_XOR(head, (elm)); \
+	(head)->sqx_last = XSIMPLEQ_XOR(head, &(elm)->field.sqx_next);	\
+} while (0)
+
+#define XSIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	if (((elm)->field.sqx_next = (listelm)->field.sqx_next) ==	\
+	    XSIMPLEQ_XOR(head, NULL))					\
+		(head)->sqx_last = XSIMPLEQ_XOR(head, &(elm)->field.sqx_next); \
+	(listelm)->field.sqx_next = XSIMPLEQ_XOR(head, (elm));		\
+} while (0)
+
+#define XSIMPLEQ_REMOVE_HEAD(head, field) do {				\
+	if (((head)->sqx_first = XSIMPLEQ_XOR(head,			\
+	    (head)->sqx_first)->field.sqx_next) == XSIMPLEQ_XOR(head, NULL)) \
+		(head)->sqx_last = XSIMPLEQ_XOR(head, &(head)->sqx_first); \
+} while (0)
+
+#define XSIMPLEQ_REMOVE_AFTER(head, elm, field) do {			\
+	if (((elm)->field.sqx_next = XSIMPLEQ_XOR(head,			\
+	    (elm)->field.sqx_next)->field.sqx_next)			\
+	    == XSIMPLEQ_XOR(head, NULL))				\
+		(head)->sqx_last = 					\
+		    XSIMPLEQ_XOR(head, &(elm)->field.sqx_next);		\
+} while (0)
+
+		    
+/*
+ * Tail queue definitions.
+ */
+#define TAILQ_HEAD(name, type)						\
+struct name {								\
+	struct type *tqh_first;	/* first element */			\
+	struct type **tqh_last;	/* addr of last next element */		\
+}
+
+#define TAILQ_HEAD_INITIALIZER(head)					\
+	{ NULL, &(head).tqh_first }
+
+#define TAILQ_ENTRY(type)						\
+struct {								\
+	struct type *tqe_next;	/* next element */			\
+	struct type **tqe_prev;	/* address of previous next element */	\
+}
+
+/* 
+ * tail queue access methods 
+ */
+#define	TAILQ_FIRST(head)		((head)->tqh_first)
+#define	TAILQ_END(head)			NULL
+#define	TAILQ_NEXT(elm, field)		((elm)->field.tqe_next)
+#define TAILQ_LAST(head, headname)					\
+	(*(((struct headname *)((head)->tqh_last))->tqh_last))
+/* XXX */
+#define TAILQ_PREV(elm, headname, field)				\
+	(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#define	TAILQ_EMPTY(head)						\
+	(TAILQ_FIRST(head) == TAILQ_END(head))
+
+#define TAILQ_FOREACH(var, head, field)					\
+	for((var) = TAILQ_FIRST(head);					\
+	    (var) != TAILQ_END(head);					\
+	    (var) = TAILQ_NEXT(var, field))
+
+#define	TAILQ_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = TAILQ_FIRST(head);					\
+	    (var) != TAILQ_END(head) &&					\
+	    ((tvar) = TAILQ_NEXT(var, field), 1);			\
+	    (var) = (tvar))
+
+
+#define TAILQ_FOREACH_REVERSE(var, head, headname, field)		\
+	for((var) = TAILQ_LAST(head, headname);				\
+	    (var) != TAILQ_END(head);					\
+	    (var) = TAILQ_PREV(var, headname, field))
+
+#define	TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar)	\
+	for ((var) = TAILQ_LAST(head, headname);			\
+	    (var) != TAILQ_END(head) &&					\
+	    ((tvar) = TAILQ_PREV(var, headname, field), 1);		\
+	    (var) = (tvar))
+
+/*
+ * Tail queue functions.
+ */
+#define	TAILQ_INIT(head) do {						\
+	(head)->tqh_first = NULL;					\
+	(head)->tqh_last = &(head)->tqh_first;				\
+} while (0)
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do {			\
+	if (((elm)->field.tqe_next = (head)->tqh_first) != NULL)	\
+		(head)->tqh_first->field.tqe_prev =			\
+		    &(elm)->field.tqe_next;				\
+	else								\
+		(head)->tqh_last = &(elm)->field.tqe_next;		\
+	(head)->tqh_first = (elm);					\
+	(elm)->field.tqe_prev = &(head)->tqh_first;			\
+} while (0)
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do {			\
+	(elm)->field.tqe_next = NULL;					\
+	(elm)->field.tqe_prev = (head)->tqh_last;			\
+	*(head)->tqh_last = (elm);					\
+	(head)->tqh_last = &(elm)->field.tqe_next;			\
+} while (0)
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+		(elm)->field.tqe_next->field.tqe_prev =			\
+		    &(elm)->field.tqe_next;				\
+	else								\
+		(head)->tqh_last = &(elm)->field.tqe_next;		\
+	(listelm)->field.tqe_next = (elm);				\
+	(elm)->field.tqe_prev = &(listelm)->field.tqe_next;		\
+} while (0)
+
+#define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
+	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
+	(elm)->field.tqe_next = (listelm);				\
+	*(listelm)->field.tqe_prev = (elm);				\
+	(listelm)->field.tqe_prev = &(elm)->field.tqe_next;		\
+} while (0)
+
+#define TAILQ_REMOVE(head, elm, field) do {				\
+	if (((elm)->field.tqe_next) != NULL)				\
+		(elm)->field.tqe_next->field.tqe_prev =			\
+		    (elm)->field.tqe_prev;				\
+	else								\
+		(head)->tqh_last = (elm)->field.tqe_prev;		\
+	*(elm)->field.tqe_prev = (elm)->field.tqe_next;			\
+	_Q_INVALIDATE((elm)->field.tqe_prev);				\
+	_Q_INVALIDATE((elm)->field.tqe_next);				\
+} while (0)
+
+#define TAILQ_REPLACE(head, elm, elm2, field) do {			\
+	if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL)	\
+		(elm2)->field.tqe_next->field.tqe_prev =		\
+		    &(elm2)->field.tqe_next;				\
+	else								\
+		(head)->tqh_last = &(elm2)->field.tqe_next;		\
+	(elm2)->field.tqe_prev = (elm)->field.tqe_prev;			\
+	*(elm2)->field.tqe_prev = (elm2);				\
+	_Q_INVALIDATE((elm)->field.tqe_prev);				\
+	_Q_INVALIDATE((elm)->field.tqe_next);				\
+} while (0)
+
+/*
+ * Circular queue definitions.
+ */
+#define CIRCLEQ_HEAD(name, type)					\
+struct name {								\
+	struct type *cqh_first;		/* first element */		\
+	struct type *cqh_last;		/* last element */		\
+}
+
+#define CIRCLEQ_HEAD_INITIALIZER(head)					\
+	{ CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
+
+#define CIRCLEQ_ENTRY(type)						\
+struct {								\
+	struct type *cqe_next;		/* next element */		\
+	struct type *cqe_prev;		/* previous element */		\
+}
+
+/*
+ * Circular queue access methods 
+ */
+#define	CIRCLEQ_FIRST(head)		((head)->cqh_first)
+#define	CIRCLEQ_LAST(head)		((head)->cqh_last)
+#define	CIRCLEQ_END(head)		((void *)(head))
+#define	CIRCLEQ_NEXT(elm, field)	((elm)->field.cqe_next)
+#define	CIRCLEQ_PREV(elm, field)	((elm)->field.cqe_prev)
+#define	CIRCLEQ_EMPTY(head)						\
+	(CIRCLEQ_FIRST(head) == CIRCLEQ_END(head))
+
+#define CIRCLEQ_FOREACH(var, head, field)				\
+	for((var) = CIRCLEQ_FIRST(head);				\
+	    (var) != CIRCLEQ_END(head);					\
+	    (var) = CIRCLEQ_NEXT(var, field))
+
+#define	CIRCLEQ_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = CIRCLEQ_FIRST(head);				\
+	    (var) != CIRCLEQ_END(head) &&				\
+	    ((tvar) = CIRCLEQ_NEXT(var, field), 1);			\
+	    (var) = (tvar))
+
+#define CIRCLEQ_FOREACH_REVERSE(var, head, field)			\
+	for((var) = CIRCLEQ_LAST(head);					\
+	    (var) != CIRCLEQ_END(head);					\
+	    (var) = CIRCLEQ_PREV(var, field))
+
+#define	CIRCLEQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar)	\
+	for ((var) = CIRCLEQ_LAST(head, headname);			\
+	    (var) != CIRCLEQ_END(head) && 				\
+	    ((tvar) = CIRCLEQ_PREV(var, headname, field), 1);		\
+	    (var) = (tvar))
+
+/*
+ * Circular queue functions.
+ */
+#define	CIRCLEQ_INIT(head) do {						\
+	(head)->cqh_first = CIRCLEQ_END(head);				\
+	(head)->cqh_last = CIRCLEQ_END(head);				\
+} while (0)
+
+#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	(elm)->field.cqe_next = (listelm)->field.cqe_next;		\
+	(elm)->field.cqe_prev = (listelm);				\
+	if ((listelm)->field.cqe_next == CIRCLEQ_END(head))		\
+		(head)->cqh_last = (elm);				\
+	else								\
+		(listelm)->field.cqe_next->field.cqe_prev = (elm);	\
+	(listelm)->field.cqe_next = (elm);				\
+} while (0)
+
+#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do {		\
+	(elm)->field.cqe_next = (listelm);				\
+	(elm)->field.cqe_prev = (listelm)->field.cqe_prev;		\
+	if ((listelm)->field.cqe_prev == CIRCLEQ_END(head))		\
+		(head)->cqh_first = (elm);				\
+	else								\
+		(listelm)->field.cqe_prev->field.cqe_next = (elm);	\
+	(listelm)->field.cqe_prev = (elm);				\
+} while (0)
+
+#define CIRCLEQ_INSERT_HEAD(head, elm, field) do {			\
+	(elm)->field.cqe_next = (head)->cqh_first;			\
+	(elm)->field.cqe_prev = CIRCLEQ_END(head);			\
+	if ((head)->cqh_last == CIRCLEQ_END(head))			\
+		(head)->cqh_last = (elm);				\
+	else								\
+		(head)->cqh_first->field.cqe_prev = (elm);		\
+	(head)->cqh_first = (elm);					\
+} while (0)
+
+#define CIRCLEQ_INSERT_TAIL(head, elm, field) do {			\
+	(elm)->field.cqe_next = CIRCLEQ_END(head);			\
+	(elm)->field.cqe_prev = (head)->cqh_last;			\
+	if ((head)->cqh_first == CIRCLEQ_END(head))			\
+		(head)->cqh_first = (elm);				\
+	else								\
+		(head)->cqh_last->field.cqe_next = (elm);		\
+	(head)->cqh_last = (elm);					\
+} while (0)
+
+#define	CIRCLEQ_REMOVE(head, elm, field) do {				\
+	if ((elm)->field.cqe_next == CIRCLEQ_END(head))			\
+		(head)->cqh_last = (elm)->field.cqe_prev;		\
+	else								\
+		(elm)->field.cqe_next->field.cqe_prev =			\
+		    (elm)->field.cqe_prev;				\
+	if ((elm)->field.cqe_prev == CIRCLEQ_END(head))			\
+		(head)->cqh_first = (elm)->field.cqe_next;		\
+	else								\
+		(elm)->field.cqe_prev->field.cqe_next =			\
+		    (elm)->field.cqe_next;				\
+	_Q_INVALIDATE((elm)->field.cqe_prev);				\
+	_Q_INVALIDATE((elm)->field.cqe_next);				\
+} while (0)
+
+#define CIRCLEQ_REPLACE(head, elm, elm2, field) do {			\
+	if (((elm2)->field.cqe_next = (elm)->field.cqe_next) ==		\
+	    CIRCLEQ_END(head))						\
+		(head)->cqh_last = (elm2);				\
+	else								\
+		(elm2)->field.cqe_next->field.cqe_prev = (elm2);	\
+	if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) ==		\
+	    CIRCLEQ_END(head))						\
+		(head)->cqh_first = (elm2);				\
+	else								\
+		(elm2)->field.cqe_prev->field.cqe_next = (elm2);	\
+	_Q_INVALIDATE((elm)->field.cqe_prev);				\
+	_Q_INVALIDATE((elm)->field.cqe_next);				\
+} while (0)
+
+#endif	/* !_SYS_QUEUE_H_ */

+ 26 - 0
include/ua_client.h

@@ -0,0 +1,26 @@
+#include "ua_util.h"
+#include "ua_types.h"
+#include "ua_connection.h"
+#include "ua_transport_generated.h"
+#include "ua_namespace_0.h"
+
+/**
+ * The client networklayer can handle only a single connection. The networklayer
+ * is only concerned with getting messages to the client and receiving them.
+ */
+typedef struct {
+    UA_StatusCode (*connect)(const UA_String endpointUrl, void **resultHandle);
+    void (*disconnect)(void *handle);
+    UA_StatusCode (*send)(void *handle, UA_ByteStringArray gather_buf);
+    // the response buffer exists on the heap. the size shall correspond the the connection settings
+    UA_StatusCode (*awaitResponse)(void *handle, UA_ByteString *response, UA_UInt32 timeout);
+} UA_ClientNetworkLayer;
+
+struct UA_Client;
+typedef struct UA_Client UA_Client;
+
+UA_Client UA_EXPORT * UA_Client_new(void);
+UA_Client UA_EXPORT * setUserDataPtr(UA_Client *c, void *userData);
+UA_StatusCode UA_EXPORT UA_Client_connect(UA_Client *c, UA_ConnectionConfig conf,
+                                          UA_ClientNetworkLayer networkLayer, char *endpointUrl);
+UA_StatusCode UA_EXPORT UA_Client_disconnect(UA_Client *c);

+ 3 - 3
include/ua_server.h

@@ -183,15 +183,15 @@ typedef struct {
     UA_Int32 (*stop)(void *nlhandle, UA_WorkItem **workItems);
 
     /** Deletes the network layer. Call only after a successfull shutdown. */
-    void (*delete)(void *nlhandle);
-} UA_NetworkLayer;
+    void (*free)(void *nlhandle);
+} UA_ServerNetworkLayer;
 
 /**
  * Adds a network layer to the server. The network layer is destroyed together
  * with the server. Do not use it after adding it as it might be moved around on
  * the heap.
  */
-void UA_EXPORT UA_Server_addNetworkLayer(UA_Server *server, UA_NetworkLayer networkLayer);
+void UA_EXPORT UA_Server_addNetworkLayer(UA_Server *server, UA_ServerNetworkLayer networkLayer);
 
 /** @} */
 

+ 14 - 25
ports/WAGO-750-860.patch

@@ -1,14 +1,18 @@
-commit b801b9275cb93f2cea1dce231f01780bed9834ca
-Author: FlorianPalm <f.palm@plt.rwth-aachen.de>
-Date:   Wed Dec 10 10:21:44 2014 +0100
+From 97515341454f26fcd14e50d5980f5f0361307814 Mon Sep 17 00:00:00 2001
+From: FlorianPalm <f.palm@plt.rwth-aachen.de>
+Date: Mon, 12 Jan 2015 18:45:01 +0100
+Subject: [PATCH] fixes to the patch
 
-    changes for WAGO 750-860
+---
+ CMakeLists.txt | 19 ++++++++-----------
+ src/ua_util.h  |  2 +-
+ 2 files changed, 9 insertions(+), 12 deletions(-)
 
 diff --git a/CMakeLists.txt b/CMakeLists.txt
-index 87489b3..76c4aa0 100644
+index c785182..9e66da0 100644
 --- a/CMakeLists.txt
 +++ b/CMakeLists.txt
-@@ -41,19 +41,16 @@ set(lib_sources src/ua_types.c
+@@ -47,19 +47,16 @@ set(lib_sources src/ua_types.c
  
  # compiler flags
  if(CMAKE_COMPILER_IS_GNUCC OR "${CMAKE_C_COMPILER_ID}" STREQUAL "Clang")
@@ -32,7 +36,7 @@ index 87489b3..76c4aa0 100644
  	endif()
  endif()
  
-@@ -115,8 +113,8 @@ else()
+@@ -150,8 +147,8 @@ else()
  endif()
  
  add_library(open62541-objects OBJECT ${lib_sources}) # static version that exports all symbols
@@ -43,21 +47,6 @@ index 87489b3..76c4aa0 100644
  
  ## logging
  set(UA_LOGLEVEL 400 CACHE STRING "Level at which logs shall be reported")
-@@ -137,7 +135,7 @@ endif()
- configure_file("src/ua_config.h.in" "${PROJECT_BINARY_DIR}/src_generated/ua_config.h")
- 
- # download queue.h if required
--if(WIN32)
-+#if(WIN32)
-     if(NOT EXISTS "${PROJECT_BINARY_DIR}/src_generated/queue.h")
-         file(DOWNLOAD "http://openbsd.cs.toronto.edu/cgi-bin/cvsweb/~checkout~/src/sys/sys/queue.h" "${PROJECT_BINARY_DIR}/src_generated/queue.h" STATUS result)
-         list(GET result 0 download_ok)
-@@ -146,7 +144,7 @@ if(WIN32)
-             message(FATAL_ERROR "queue.h could not be downloaded")
-         endif()
-     endif()
--endif(WIN32)
-+#endif(WIN32)
- 
- # generate code from xml definitions
- file(MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/src_generated")
+-- 
+2.2.0
+

+ 3 - 3
src/server/ua_server.c

@@ -29,8 +29,8 @@ static void UA_ExternalNamespace_deleteMembers(UA_ExternalNamespace *ens) {
 /* Configuration */
 /*****************/
 
-void UA_Server_addNetworkLayer(UA_Server *server, UA_NetworkLayer networkLayer) {
-    server->nls = UA_realloc(server->nls, sizeof(UA_NetworkLayer)*(server->nlsSize+1));
+void UA_Server_addNetworkLayer(UA_Server *server, UA_ServerNetworkLayer networkLayer) {
+    server->nls = UA_realloc(server->nls, sizeof(UA_ServerNetworkLayer)*(server->nlsSize+1));
     server->nls[server->nlsSize] = networkLayer;
     server->nlsSize++;
 }
@@ -48,7 +48,7 @@ void UA_Server_delete(UA_Server *server) {
 
     // Delete the network layers
     for(UA_Int32 i=0;i<server->nlsSize;i++) {
-        server->nls[i].delete(server->nls[i].nlHandle);
+        server->nls[i].free(server->nls[i].nlHandle);
     }
     UA_free(server->nls);
 

+ 1 - 1
src/server/ua_server_internal.h

@@ -44,7 +44,7 @@ struct UA_Server {
     UA_ExternalNamespace *externalNamespaces;
 
     UA_Int32 nlsSize;
-    UA_NetworkLayer *nls;
+    UA_ServerNetworkLayer *nls;
 
     UA_UInt32 random_seed;
 

+ 1 - 1
src/server/ua_server_worker.c

@@ -418,7 +418,7 @@ UA_StatusCode UA_Server_run(UA_Server *server, UA_UInt16 nThreads, UA_Boolean *r
 
         // 3.2) Get work from the networklayer and dispatch it
         for(UA_Int32 i=0;i<server->nlsSize;i++) {
-            UA_NetworkLayer *nl = &server->nls[i];
+            UA_ServerNetworkLayer *nl = &server->nls[i];
             UA_WorkItem *work;
             UA_Int32 workSize;
             if(*running) {

+ 24 - 24
src/server/ua_services_attribute.c

@@ -256,11 +256,11 @@ void Service_Read(UA_Server *server, UA_Session *session, const UA_ReadRequest *
 #endif
 }
 
-static UA_StatusCode writeValue(UA_Server *server, UA_WriteValue *writeValue) {
+static UA_StatusCode writeValue(UA_Server *server, UA_WriteValue *aWriteValue) {
     UA_StatusCode retval = UA_STATUSCODE_GOOD;
 
     do {
-        const UA_Node *node = UA_NodeStore_get(server->nodestore, &writeValue->nodeId);
+        const UA_Node *node = UA_NodeStore_get(server->nodestore, &aWriteValue->nodeId);
         if(!node)
             return UA_STATUSCODE_BADNODEIDUNKNOWN;
 
@@ -311,39 +311,39 @@ static UA_StatusCode writeValue(UA_Server *server, UA_WriteValue *writeValue) {
             break;
         }
 
-        switch(writeValue->attributeId) {
+        switch(aWriteValue->attributeId) {
         case UA_ATTRIBUTEID_NODEID:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){ } */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){ } */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
         case UA_ATTRIBUTEID_NODECLASS:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){ } */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){ } */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
         case UA_ATTRIBUTEID_BROWSENAME:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
         case UA_ATTRIBUTEID_DISPLAYNAME:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
         case UA_ATTRIBUTEID_DESCRIPTION:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
         case UA_ATTRIBUTEID_WRITEMASK:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
         case UA_ATTRIBUTEID_USERWRITEMASK:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
@@ -352,12 +352,12 @@ static UA_StatusCode writeValue(UA_Server *server, UA_WriteValue *writeValue) {
             break;
 
         case UA_ATTRIBUTEID_SYMMETRIC:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
         case UA_ATTRIBUTEID_INVERSENAME:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
@@ -367,7 +367,7 @@ static UA_StatusCode writeValue(UA_Server *server, UA_WriteValue *writeValue) {
             break;
 
         case UA_ATTRIBUTEID_EVENTNOTIFIER:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
@@ -377,52 +377,52 @@ static UA_StatusCode writeValue(UA_Server *server, UA_WriteValue *writeValue) {
                 break;
             }
 
-            if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT)
-                retval |= UA_Variant_copy(&writeValue->value.value, &((UA_VariableNode *)newNode)->value); // todo: zero-copy
+            if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT)
+                retval |= UA_Variant_copy(&aWriteValue->value.value, &((UA_VariableNode *)newNode)->value); // todo: zero-copy
             break;
 
         case UA_ATTRIBUTEID_DATATYPE:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
         case UA_ATTRIBUTEID_VALUERANK:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
         case UA_ATTRIBUTEID_ARRAYDIMENSIONS:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
         case UA_ATTRIBUTEID_ACCESSLEVEL:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
         case UA_ATTRIBUTEID_USERACCESSLEVEL:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
         case UA_ATTRIBUTEID_MINIMUMSAMPLINGINTERVAL:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
         case UA_ATTRIBUTEID_HISTORIZING:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
         case UA_ATTRIBUTEID_EXECUTABLE:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 
         case UA_ATTRIBUTEID_USEREXECUTABLE:
-            /* if(writeValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
+            /* if(aWriteValue->value.encodingMask == UA_DATAVALUE_ENCODINGMASK_VARIANT){} */
             retval = UA_STATUSCODE_BADWRITENOTSUPPORTED;
             break;
 

+ 10 - 9
src/ua_types.c

@@ -14,7 +14,7 @@
 
 #include "ua_util.h"
 
-#ifdef _MSC_VER
+#ifdef _WIN32
 #define RAND(SEED) (UA_UInt32)rand()
 #else
 #define RAND(SEED) (UA_UInt32)rand_r(SEED)
@@ -232,8 +232,9 @@ UA_TYPE_AS(UA_DateTime, UA_Int64)
 #define HUNDRED_NANOSEC_PER_USEC 10LL
 #define HUNDRED_NANOSEC_PER_SEC (HUNDRED_NANOSEC_PER_USEC * 1000000LL)
 
-#ifdef _MSC_VER
+#ifdef _WIN32
 static const unsigned __int64 epoch = 116444736000000000;
+int gettimeofday(struct timeval *tp, struct timezone *tzp);
 int gettimeofday(struct timeval *tp, struct timezone *tzp) {
     FILETIME       ft;
     SYSTEMTIME     st;
@@ -257,15 +258,15 @@ UA_DateTime UA_DateTime_now() {
     return dateTime;
 }
 
-UA_DateTimeStruct UA_DateTime_toStruct(UA_DateTime time) {
+UA_DateTimeStruct UA_DateTime_toStruct(UA_DateTime atime) {
     UA_DateTimeStruct dateTimeStruct;
     //calcualting the the milli-, micro- and nanoseconds
-    dateTimeStruct.nanoSec  = (UA_Int16)((time % 10) * 100);
-    dateTimeStruct.microSec = (UA_Int16)((time % 10000) / 10);
-    dateTimeStruct.milliSec = (UA_Int16)((time % 10000000) / 10000);
+    dateTimeStruct.nanoSec  = (UA_Int16)((atime % 10) * 100);
+    dateTimeStruct.microSec = (UA_Int16)((atime % 10000) / 10);
+    dateTimeStruct.milliSec = (UA_Int16)((atime % 10000000) / 10000);
 
     //calculating the unix time with #include <time.h>
-    time_t secSinceUnixEpoch = (time/10000000) - UNIX_EPOCH_BIAS_SEC;
+    time_t secSinceUnixEpoch = (atime/10000000) - UNIX_EPOCH_BIAS_SEC;
     struct tm ts = *gmtime(&secSinceUnixEpoch);
     dateTimeStruct.sec    = (UA_Int16)ts.tm_sec;
     dateTimeStruct.min    = (UA_Int16)ts.tm_min;
@@ -276,13 +277,13 @@ UA_DateTimeStruct UA_DateTime_toStruct(UA_DateTime time) {
     return dateTimeStruct;
 }
 
-UA_StatusCode UA_DateTime_toString(UA_DateTime time, UA_String *timeString) {
+UA_StatusCode UA_DateTime_toString(UA_DateTime atime, UA_String *timeString) {
     // length of the string is 31 (incl. \0 at the end)
     if(!(timeString->data = UA_malloc(32)))
         return UA_STATUSCODE_BADOUTOFMEMORY;
     timeString->length = 31;
 
-    UA_DateTimeStruct tSt = UA_DateTime_toStruct(time);
+    UA_DateTimeStruct tSt = UA_DateTime_toStruct(atime);
     sprintf((char*)timeString->data, "%2d/%2d/%4d %2d:%2d:%2d.%3d.%3d.%3d", tSt.month, tSt.day, tSt.year,
             tSt.hour, tSt.min, tSt.sec, tSt.milliSec, tSt.microSec, tSt.nanoSec);
     return UA_STATUSCODE_GOOD;

+ 0 - 28
src/ua_util.c

@@ -1,28 +0,0 @@
-#include "ua_util.h"
-
-#define __USE_POSIX
-#include <stdlib.h> // malloc, free
-#include <string.h> // memcpy
-
-/* the extern inline in a *.c-file is required for other compilation units to
-   see the inline function. */
-
-void UA_free(void *ptr) {
-    free(ptr); // checks if ptr != UA_NULL in the background
-}
-
-void * UA_malloc(UA_UInt32 size) {
-    return malloc(size);
-}
-
-void * UA_realloc(void *ptr, UA_UInt32 size) {
-    return realloc(ptr, size);
-}
-
-void UA_memcpy(void *dst, void const *src, UA_UInt32 size) {
-    memcpy(dst, src, size);
-}
-
-void * UA_memset(void *ptr, UA_Int32 value, UA_UInt32 size) {
-    return memset(ptr, value, size);
-}

+ 18 - 15
src/ua_util.h

@@ -1,15 +1,18 @@
 #ifndef UA_UTIL_H_
 #define UA_UTIL_H_
 
+#define __USE_POSIX
+#include <stdlib.h> // malloc, free
+#include <string.h> // memcpy
 #include <assert.h> // assert
+#include <stddef.h> /* Needed for queue.h */
 
-#include <stddef.h> /* Needed for sys/queue.h */
-#ifndef WIN32
-#include <sys/queue.h>
-#include <alloca.h>
+#ifdef _WIN32
+#  include <malloc.h>
+#  include "queue.h"
 #else
-#include "queue.h"
-#include <malloc.h>
+#  include <alloca.h>
+#  include <sys/queue.h>
 #endif
 
 #include "ua_types.h"
@@ -22,17 +25,17 @@
 
 #define UA_assert(ignore) assert(ignore)
 
-// these are inlined for release builds
-void UA_free(void *ptr);
-void * UA_malloc(UA_UInt32 size);
-void * UA_realloc(void *ptr, UA_UInt32 size);
-void UA_memcpy(void *dst, void const *src, UA_UInt32 size);
-void * UA_memset(void *ptr, UA_Int32 value, UA_UInt32 size);
+/* Replace the macros with functions for custom allocators.. */
+#define UA_free(ptr) free(ptr)
+#define UA_malloc(size) malloc(size)
+#define UA_realloc(ptr, size) realloc(ptr, size)
+#define UA_memcpy(dst, src, size) memcpy(dst, src, size)
+#define UA_memset(ptr, value, size) memset(ptr, value, size)
 
-#ifdef WIN32
-#define UA_alloca(SIZE) _alloca(SIZE)
+#ifdef _WIN32
+# define UA_alloca(SIZE) _alloca(SIZE)
 #else
-#define UA_alloca(SIZE) alloca(SIZE)
+# define UA_alloca(SIZE) alloca(SIZE)
 #endif
 
 #endif /* UA_UTIL_H_ */