nobase_dist_doc_DATA += examples/kea6/softwire46.json
nobase_dist_doc_DATA += examples/kea6/stateless.json
nobase_dist_doc_DATA += examples/kea6/with-ddns.json
+nobase_dist_doc_DATA += examples/netconf/comments.json
+nobase_dist_doc_DATA += examples/netconf/simple.json
devel:
mkdir -p html
--- /dev/null
+// This is a example of a configuration for Netconf.
+// It uses embedded (i.e., which will be included in configuration objects
+// and not stripped by at lexical analysis) comments.
+
+{
+ "Netconf":
+ {
+ // Global scope
+ "comment": "The Netconf Agent",
+
+ // In servers
+ "managed-servers":
+ {
+ "dhcp4":
+ {
+ "comment": "the model is mandatory",
+ "model": "kea-dhcp4-server",
+ // In control socket.
+ "control-socket":
+ {
+ "comment": "using unix/local socket",
+ "type": "unix",
+ "name": "/path/to/the/unix/socket-v4"
+ }
+ }
+ }
+
+ },
+
+ "Logging":
+ {
+ // In loggers
+ "loggers": [
+ {
+ "comment": "A logger",
+ "name": "kea-ctrl-agent"
+ }
+ ]
+ }
+}
--- /dev/null
+// This is a simple example of a configuration for Netconf.
+// This server provides YANG interface for all Kea servers and agent.
+{
+ "Netconf":
+ {
+ // This map specifies how each server is managed:
+ // the YANG model to use and the control channel.
+ // Currently three control channel types are supported:
+ // "stdout" which output the configuration on the standard output,
+ // "unix" which uses the local control channel supported by
+ // "dhcp4" and "dhcp6" servers ("d2" support is not yet merged),
+ // and "http" which uses the Control agent "ca" to manage itself or
+ // to forward commands to "dhcp4" or "dhcp6" (same comment about "d2".
+ "managed-servers":
+ {
+ // This is how Netconf can communicate with the DHCPv4 server.
+ "dhcp4":
+ {
+ "comment": "DHCP4 server",
+ "model": "kea-dhcp4-server",
+ "control-socket":
+ {
+ "type": "unix",
+ "name": "/path/to/the/unix/socket-v4"
+ }
+ },
+
+ // DHCPv6 parameters.
+ "dhcp6":
+ {
+ "model": "kea-dhcp6-server",
+ "control-socket":
+ {
+ "type": "unix",
+ "name": "/path/to/the/unix/socket-v6"
+ }
+ },
+
+ // Currently the DHCP-DDNS (nicknamed D2) server does not support
+ // command channel yet.
+ "d2":
+ {
+ "model": "kea-dhcp-ddns",
+ "control-socket":
+ {
+ "type": "stdout",
+ "user-context": { "in-use": false }
+ }
+ },
+
+ // Of course the Control Agent (nicknamed CA) supports HTTP.
+ "ca":
+ {
+ "model": "kea-ctrl-agent",
+ "control-socket":
+ {
+ "type": "http",
+ "host": "127.0.0.1",
+ "port": 8000
+ }
+ }
+ },
+
+ // Netconf is able to load hook libraries that augment its operation.
+ // The primary functionality is the ability to add new commands.
+ "hooks-libraries": [
+ // Hook libraries list may contain more than one library.
+ {
+ // The only necessary parameter is the library filename.
+ "library": "/opt/local/netconf-commands.so",
+
+ // Some libraries may support parameters. Make sure you
+ // type this section carefully, as the CA does not validate
+ // it (because the format is library specific).
+ "parameters": {
+ "param1": "foo"
+ }
+ }
+ ]
+
+ },
+
+ // Similar to other Kea components, Netconf also uses logging.
+ "Logging":
+ {
+ "loggers": [
+ {
+ "name": "kea-netconf",
+ "output_options": [
+ {
+ "output": "/var/log/kea-netconf.log",
+ // Several additional parameters are possible in addition
+ // to the typical output. Flush determines whether logger
+ // flushes output to a file. Maxsize determines maximum
+ // filesize before the file is being rotated. maxver
+ // specifies the maximum number of rotated files being
+ // kept.
+ "flush": true,
+ "maxsize": 204800,
+ "maxver": 4
+ }
+ ],
+ "severity": "INFO",
+ "debuglevel": 0
+ }
+ ]
+ }
+}
-
SUBDIRS = . tests
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
man_MANS = kea-netconf.8
DISTCLEANFILES = $(man_MANS)
EXTRA_DIST = $(man_MANS) kea-netconf.xml
-#EXTRA_DIST += netconf.dox netconf_hooks.dox netconfo6.dox
+#EXTRA_DIST += netconf.dox netconf_hooks.dox
#EXTRA_DIST += netconf_parser.yy
if GENERATE_DOCS
noinst_LTLIBRARIES = libnetconf.la
libnetconf_la_SOURCES = netconf_log.cc netconf_log.h
+libnetconf_la_SOURCES += parser_context.cc parser_context.h
+libnetconf_la_SOURCES += parser_context_decl.h netconf_lexer.ll
+libnetconf_la_SOURCES += netconf_parser.cc netconf_parser.h
+libnetconf_la_SOURCES += location.hh position.hh stack.hh
nodist_libnetconf_la_SOURCES = netconf_messages.h netconf_messages.cc
EXTRA_DIST += netconf_messages.mes
+EXTRA_DIST += netconf_lexer.ll
+EXTRA_DIST += netconf_parser.yy
sbin_PROGRAMS = kea-netconf
kea_netconf_SOURCES = main.cc
kea_netconf_LDADD = libnetconf.la
+kea_netconf_LDADD += $(top_builddir)/src/lib/http/libkea-http.la
kea_netconf_LDADD += $(top_builddir)/src/lib/process/libkea-process.la
+kea_netconf_LDADD += $(top_builddir)/src/lib/cfgrpt/libcfgrpt.la
+kea_netconf_LDADD += $(top_builddir)/src/lib/yang/libkea-yang.la
+kea_netconf_LDADD += $(top_builddir)/src/lib/stats/libkea-stats.la
+kea_netconf_LDADD += $(top_builddir)/src/lib/config/libkea-cfgclient.la
+kea_netconf_LDADD += $(top_builddir)/src/lib/dhcp/libkea-dhcp++.la
+kea_netconf_LDADD += $(top_builddir)/src/lib/hooks/libkea-hooks.la
+kea_netconf_LDADD += $(top_builddir)/src/lib/cc/libkea-cc.la
+kea_netconf_LDADD += $(top_builddir)/src/lib/asiolink/libkea-asiolink.la
+kea_netconf_LDADD += $(top_builddir)/src/lib/dns/libkea-dns++.la
+kea_netconf_LDADD += $(top_builddir)/src/lib/cryptolink/libkea-cryptolink.la
kea_netconf_LDADD += $(top_builddir)/src/lib/log/libkea-log.la
+kea_netconf_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+kea_netconf_LDADD += $(top_builddir)/src/lib/util/threads/libkea-threads.la
+kea_netconf_LDADD += $(top_builddir)/src/lib/util/libkea-util.la
kea_netconf_LDADD += $(top_builddir)/src/lib/exceptions/libkea-exceptions.la
kea_netconf_LDADD += $(LOG4CPLUS_LIBS) $(CRYPTO_LIBS) $(BOOST_LIBS) $(SYSREPO_LIBS)
if GENERATE_PARSER
-#parser: netconf_lexer.cc location.hh position.hh stack.hh netconf_parser.cc netconf_parser.h
+parser: netconf_lexer.cc location.hh position.hh stack.hh netconf_parser.cc netconf_parser.h
# @echo "Flex/bison files regenerated"
# --- Flex/Bison stuff below --------------------------------------------------
# Call flex with -s to check that the default rule can be suppressed
# Call bison with -W to get warnings like unmarked empty rules
# Note C++11 deprecated register still used by flex < 2.6.0
-#location.hh position.hh stack.hh netconf_parser.cc netconf_parser.h: netconf_parser.yy
-# $(YACC) --defines=netconf_parser.h --report=all --report-file=netconf_parser.report -o netconf_parser.cc netconf_parser.yy
+location.hh position.hh stack.hh netconf_parser.cc netconf_parser.h: netconf_parser.yy
+ $(YACC) --defines=netconf_parser.h --report=all --report-file=netconf_parser.report -o netconf_parser.cc netconf_parser.yy
-#netconf_lexer.cc: netconf_lexer.ll
-# $(LEX) --prefix netconf_ -o netconf_lexer.cc netconf_lexer.ll
+netconf_lexer.cc: netconf_lexer.ll
+ $(LEX) --prefix netconf_ -o netconf_lexer.cc netconf_lexer.ll
else
--- /dev/null
+/* Copyright (C) 2018 Internet Systems Consortium, Inc. ("ISC")
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+%{ /* -*- C++ -*- */
+
+/* Generated files do not make clang static analyser so happy */
+#ifndef __clang_analyzer__
+
+#include <cerrno>
+#include <climits>
+#include <cstdlib>
+#include <string>
+#include <netconf/parser_context.h>
+#include <asiolink/io_address.h>
+#include <boost/lexical_cast.hpp>
+#include <exceptions/exceptions.h>
+#include <cc/dhcp_config_error.h>
+
+/* Please avoid C++ style comments (// ... eol) as they break flex 2.6.2 */
+
+/* Work around an incompatibility in flex (at least versions
+ 2.5.31 through 2.5.33): it generates code that does
+ not conform to C89. See Debian bug 333231
+ <http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=333231>. */
+# undef yywrap
+# define yywrap() 1
+
+namespace {
+
+bool start_token_flag = false;
+
+isc::netconf::ParserContext::ParserType start_token_value;
+unsigned int comment_start_line = 0;
+
+using namespace isc;
+using isc::netconf::NetconfParser;
+
+};
+
+/* To avoid the call to exit... oops! */
+#define YY_FATAL_ERROR(msg) isc::netconf::ParserContext::fatal(msg)
+%}
+
+/* noyywrap disables automatic rewinding for the next file to parse. Since we
+ always parse only a single string, there's no need to do any wraps. And
+ using yywrap requires linking with -lfl, which provides the default yywrap
+ implementation that always returns 1 anyway. */
+%option noyywrap
+
+/* nounput simplifies the lexer, by removing support for putting a character
+ back into the input stream. We never use such capability anyway. */
+%option nounput
+
+/* batch means that we'll never use the generated lexer interactively. */
+%option batch
+
+/* avoid to get static global variables to remain with C++. */
+/* in last resort %option reentrant */
+
+/* Enables debug mode. To see the debug messages, one needs to also set
+ yy_flex_debug to 1, then the debug messages will be printed on stderr. */
+%option debug
+
+/* I have no idea what this option does, except it was specified in the bison
+ examples and Postgres folks added it to remove gcc 4.3 warnings. Let's
+ be on the safe side and keep it. */
+%option noinput
+
+%x COMMENT
+%x DIR_ENTER DIR_INCLUDE DIR_EXIT
+
+/* These are not token expressions yet, just convenience expressions that
+ can be used during actual token definitions. Note some can match
+ incorrect inputs (e.g., IP addresses) which must be checked. */
+int \-?[0-9]+
+blank [ \t\r]
+
+UnicodeEscapeSequence u[0-9A-Fa-f]{4}
+JSONEscapeCharacter ["\\/bfnrt]
+JSONEscapeSequence {JSONEscapeCharacter}|{UnicodeEscapeSequence}
+JSONStandardCharacter [^\x00-\x1f"\\]
+JSONStringCharacter {JSONStandardCharacter}|\\{JSONEscapeSequence}
+JSONString \"{JSONStringCharacter}*\"
+
+/* for errors */
+
+BadUnicodeEscapeSequence u[0-9A-Fa-f]{0,3}[^0-9A-Fa-f]
+BadJSONEscapeSequence [^"\\/bfnrtu]|{BadUnicodeEscapeSequence}
+ControlCharacter [\x00-\x1f]
+ControlCharacterFill [^"\\]|\\{JSONEscapeSequence}
+
+%{
+/* This code run each time a pattern is matched. It updates the location
+ by moving it ahead by yyleng bytes. yyleng specifies the length of the
+ currently matched token. */
+#define YY_USER_ACTION driver.loc_.columns(yyleng);
+%}
+
+%%
+
+%{
+ /* This part of the code is copied over to the verbatim to the top
+ of the generated yylex function. Explanation:
+ http://www.gnu.org/software/bison/manual/html_node/Multiple-start_002dsymbols.html */
+
+ /* Code run each time yylex is called. */
+ driver.loc_.step();
+
+ /* We currently have 3 points of entries defined:
+ START_JSON - which expects any valid JSON
+ START_NETCONF - which expects full configuration (with outer map and Control-netconf
+ object in it.
+ START_SUB_NETCONF - which expects only content of the Control-netconf, this is
+ primarily useful for testing. */
+ if (start_token_flag) {
+ start_token_flag = false;
+ switch (start_token_value) {
+ case ParserContext::PARSER_JSON:
+ default:
+ return NetconfParser::make_START_JSON(driver.loc_);
+ case ParserContext::PARSER_NETCONF:
+ return NetconfParser::make_START_NETCONF(driver.loc_);
+ case ParserContext::PARSER_SUB_NETCONF:
+ return NetconfParser::make_START_SUB_NETCONF(driver.loc_);
+ }
+ }
+%}
+
+#.* ;
+
+"//"(.*) ;
+
+"/*" {
+ BEGIN(COMMENT);
+ comment_start_line = driver.loc_.end.line;;
+}
+
+<COMMENT>"*/" BEGIN(INITIAL);
+<COMMENT>. ;
+<COMMENT><<EOF>> {
+ isc_throw(ParseError, "Comment not closed. (/* in line " << comment_start_line);
+}
+
+"<?" BEGIN(DIR_ENTER);
+<DIR_ENTER>"include" BEGIN(DIR_INCLUDE);
+<DIR_INCLUDE>\"([^\"\n])+\" {
+ /* Include directive. */
+
+ /* Extract the filename. */
+ std::string tmp(yytext+1);
+ tmp.resize(tmp.size() - 1);
+
+ driver.includeFile(tmp);
+}
+<DIR_ENTER,DIR_INCLUDE,DIR_EXIT><<EOF>> {
+ isc_throw(ParseError, "Directive not closed.");
+}
+<DIR_EXIT>"?>" BEGIN(INITIAL);
+
+
+<*>{blank}+ {
+ /* Ok, we found a with space. Let's ignore it and update loc variable. */
+ driver.loc_.step();
+}
+
+<*>[\n]+ {
+ /* Newline found. Let's update the location and continue. */
+ driver.loc_.lines(yyleng);
+ driver.loc_.step();
+}
+
+
+\"Netconf\" {
+ switch(driver.ctx_) {
+ case ParserContext::CONFIG:
+ return NetconfParser::make_NETCONF(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("Netconf", driver.loc_);
+ }
+}
+
+\"user-context\" {
+ switch(driver.ctx_) {
+ case ParserContext::NETCONF:
+ case ParserContext::SERVER:
+ case ParserContext::CONTROL_SOCKET:
+ case ParserContext::LOGGERS:
+ return NetconfParser::make_USER_CONTEXT(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("user-context", driver.loc_);
+ }
+}
+
+\"comment\" {
+ switch(driver.ctx_) {
+ case ParserContext::NETCONF:
+ case ParserContext::SERVER:
+ case ParserContext::CONTROL_SOCKET:
+ case ParserContext::LOGGERS:
+ return NetconfParser::make_COMMENT(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("comment", driver.loc_);
+ }
+}
+
+\"managed-servers\" {
+ switch(driver.ctx_) {
+ case ParserContext::NETCONF:
+ return NetconfParser::make_MANAGED_SERVERS(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("managed-servers", driver.loc_);
+ }
+}
+
+\"dhcp4\" {
+ switch(driver.ctx_) {
+ case ParserContext::MANAGED_SERVERS:
+ return NetconfParser::make_DHCP4_SERVER(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("dhcp4", driver.loc_);
+ }
+}
+
+\"dhcp6\" {
+ switch(driver.ctx_) {
+ case ParserContext::MANAGED_SERVERS:
+ return NetconfParser::make_DHCP6_SERVER(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("dhcp6", driver.loc_);
+ }
+}
+
+\"d2\" {
+ switch(driver.ctx_) {
+ case ParserContext::MANAGED_SERVERS:
+ return NetconfParser::make_D2_SERVER(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("d2", driver.loc_);
+ }
+}
+
+\"ca\" {
+ switch(driver.ctx_) {
+ case ParserContext::MANAGED_SERVERS:
+ return NetconfParser::make_CA_SERVER(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("ca", driver.loc_);
+ }
+}
+
+\"model\" {
+ switch(driver.ctx_) {
+ case ParserContext::SERVER:
+ return NetconfParser::make_MODEL(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("model", driver.loc_);
+ }
+}
+
+\"control-socket\" {
+ switch(driver.ctx_) {
+ case ParserContext::SERVER:
+ return NetconfParser::make_CONTROL_SOCKET(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("control-socket", driver.loc_);
+ }
+}
+
+\"unix\" {
+ switch(driver.ctx_) {
+ case ParserContext::SOCKET_TYPE:
+ return NetconfParser::make_UNIX(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("unix", driver.loc_);
+ }
+}
+
+\"http\" {
+ switch(driver.ctx_) {
+ case ParserContext::SOCKET_TYPE:
+ return NetconfParser::make_HTTP(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("http", driver.loc_);
+ }
+}
+
+\"stdout\" {
+ switch(driver.ctx_) {
+ case ParserContext::SOCKET_TYPE:
+ return NetconfParser::make_STDOUT(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("stdout", driver.loc_);
+ }
+}
+
+\"name\" {
+ switch(driver.ctx_) {
+ case ParserContext::CONTROL_SOCKET:
+ case ParserContext::LOGGERS:
+ return NetconfParser::make_NAME(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("name", driver.loc_);
+ }
+}
+
+\"type\" {
+ switch(driver.ctx_) {
+ case ParserContext::CONTROL_SOCKET:
+ return NetconfParser::make_TYPE(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("type", driver.loc_);
+ }
+}
+
+\"host\" {
+ switch(driver.ctx_) {
+ case ParserContext::CONTROL_SOCKET:
+ return NetconfParser::make_HOST(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("host", driver.loc_);
+ }
+}
+
+\"port\" {
+ switch(driver.ctx_) {
+ case ParserContext::CONTROL_SOCKET:
+ return NetconfParser::make_PORT(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("port", driver.loc_);
+ }
+}
+
+\"hooks-libraries\" {
+ switch(driver.ctx_) {
+ case ParserContext::NETCONF:
+ return NetconfParser::make_HOOKS_LIBRARIES(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("hooks-libraries", driver.loc_);
+ }
+}
+
+\"library\" {
+ switch(driver.ctx_) {
+ case ParserContext::HOOKS_LIBRARIES:
+ return NetconfParser::make_LIBRARY(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("library", driver.loc_);
+ }
+}
+
+\"parameters\" {
+ switch(driver.ctx_) {
+ case ParserContext::HOOKS_LIBRARIES:
+ return NetconfParser::make_PARAMETERS(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("parameters", driver.loc_);
+ }
+}
+
+\"Logging\" {
+ switch(driver.ctx_) {
+ case ParserContext::CONFIG:
+ return NetconfParser::make_LOGGING(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("Logging", driver.loc_);
+ }
+}
+
+\"loggers\" {
+ switch(driver.ctx_) {
+ case ParserContext::LOGGING:
+ return NetconfParser::make_LOGGERS(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("loggers", driver.loc_);
+ }
+}
+
+\"output_options\" {
+ switch(driver.ctx_) {
+ case ParserContext::LOGGERS:
+ return NetconfParser::make_OUTPUT_OPTIONS(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("output_options", driver.loc_);
+ }
+}
+
+\"output\" {
+ switch(driver.ctx_) {
+ case ParserContext::OUTPUT_OPTIONS:
+ return NetconfParser::make_OUTPUT(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("output", driver.loc_);
+ }
+}
+
+\"flush\" {
+ switch(driver.ctx_) {
+ case ParserContext::OUTPUT_OPTIONS:
+ return NetconfParser::make_FLUSH(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("flush", driver.loc_);
+ }
+}
+
+\"maxsize\" {
+ switch(driver.ctx_) {
+ case ParserContext::OUTPUT_OPTIONS:
+ return NetconfParser::make_MAXSIZE(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("maxsize", driver.loc_);
+ }
+}
+
+\"maxver\" {
+ switch(driver.ctx_) {
+ case ParserContext::OUTPUT_OPTIONS:
+ return NetconfParser::make_MAXVER(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("maxver", driver.loc_);
+ }
+}
+
+\"debuglevel\" {
+ switch(driver.ctx_) {
+ case ParserContext::LOGGERS:
+ return NetconfParser::make_DEBUGLEVEL(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("debuglevel", driver.loc_);
+ }
+}
+
+\"severity\" {
+ switch(driver.ctx_) {
+ case ParserContext::LOGGERS:
+ return NetconfParser::make_SEVERITY(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("severity", driver.loc_);
+ }
+}
+
+\"Dhcp4\" {
+ switch(driver.ctx_) {
+ case ParserContext::CONFIG:
+ return NetconfParser::make_DHCP4(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("Dhcp4", driver.loc_);
+ }
+}
+
+\"Dhcp6\" {
+ switch(driver.ctx_) {
+ case ParserContext::CONFIG:
+ return NetconfParser::make_DHCP6(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("Dhcp6", driver.loc_);
+ }
+}
+
+\"DhcpDdns\" {
+ switch(driver.ctx_) {
+ case ParserContext::CONFIG:
+ return NetconfParser::make_DHCPDDNS(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("DhcpDdns", driver.loc_);
+ }
+}
+
+\"Control-agent\" {
+ switch(driver.ctx_) {
+ case ParserContext::CONFIG:
+ return NetconfParser::make_CONTROL_AGENT(driver.loc_);
+ default:
+ return NetconfParser::make_STRING("Control-agent", driver.loc_);
+ }
+}
+
+{JSONString} {
+ /* A string has been matched. It contains the actual string and single quotes.
+ We need to get those quotes out of the way and just use its content, e.g.
+ for 'foo' we should get foo */
+ std::string raw(yytext+1);
+ size_t len = raw.size() - 1;
+ raw.resize(len);
+ std::string decoded;
+ decoded.reserve(len);
+ for (size_t pos = 0; pos < len; ++pos) {
+ int b = 0;
+ char c = raw[pos];
+ switch (c) {
+ case '"':
+ /* impossible condition */
+ driver.error(driver.loc_, "Bad quote in \"" + raw + "\"");
+ break;
+ case '\\':
+ ++pos;
+ if (pos >= len) {
+ /* impossible condition */
+ driver.error(driver.loc_, "Overflow escape in \"" + raw + "\"");
+ }
+ c = raw[pos];
+ switch (c) {
+ case '"':
+ case '\\':
+ case '/':
+ decoded.push_back(c);
+ break;
+ case 'b':
+ decoded.push_back('\b');
+ break;
+ case 'f':
+ decoded.push_back('\f');
+ break;
+ case 'n':
+ decoded.push_back('\n');
+ break;
+ case 'r':
+ decoded.push_back('\r');
+ break;
+ case 't':
+ decoded.push_back('\t');
+ break;
+ case 'u':
+ /* support only \u0000 to \u00ff */
+ ++pos;
+ if (pos + 4 > len) {
+ /* impossible condition */
+ driver.error(driver.loc_,
+ "Overflow unicode escape in \"" + raw + "\"");
+ }
+ if ((raw[pos] != '0') || (raw[pos + 1] != '0')) {
+ driver.error(driver.loc_, "Unsupported unicode escape in \"" + raw + "\"");
+ }
+ pos += 2;
+ c = raw[pos];
+ if ((c >= '0') && (c <= '9')) {
+ b = (c - '0') << 4;
+ } else if ((c >= 'A') && (c <= 'F')) {
+ b = (c - 'A' + 10) << 4;
+ } else if ((c >= 'a') && (c <= 'f')) {
+ b = (c - 'a' + 10) << 4;
+ } else {
+ /* impossible condition */
+ driver.error(driver.loc_, "Not hexadecimal in unicode escape in \"" + raw + "\"");
+ }
+ pos++;
+ c = raw[pos];
+ if ((c >= '0') && (c <= '9')) {
+ b |= c - '0';
+ } else if ((c >= 'A') && (c <= 'F')) {
+ b |= c - 'A' + 10;
+ } else if ((c >= 'a') && (c <= 'f')) {
+ b |= c - 'a' + 10;
+ } else {
+ /* impossible condition */
+ driver.error(driver.loc_, "Not hexadecimal in unicode escape in \"" + raw + "\"");
+ }
+ decoded.push_back(static_cast<char>(b & 0xff));
+ break;
+ default:
+ /* impossible condition */
+ driver.error(driver.loc_, "Bad escape in \"" + raw + "\"");
+ }
+ break;
+ default:
+ if ((c >= 0) && (c < 0x20)) {
+ /* impossible condition */
+ driver.error(driver.loc_, "Invalid control in \"" + raw + "\"");
+ }
+ decoded.push_back(c);
+ }
+ }
+
+ return NetconfParser::make_STRING(decoded, driver.loc_);
+}
+
+\"{JSONStringCharacter}*{ControlCharacter}{ControlCharacterFill}*\" {
+ /* Bad string with a forbidden control character inside */
+ driver.error(driver.loc_, "Invalid control in " + std::string(yytext));
+}
+
+\"{JSONStringCharacter}*\\{BadJSONEscapeSequence}[^\x00-\x1f"]*\" {
+ /* Bad string with a bad escape inside */
+ driver.error(driver.loc_, "Bad escape in " + std::string(yytext));
+}
+
+\"{JSONStringCharacter}*\\\" {
+ /* Bad string with an open escape at the end */
+ driver.error(driver.loc_, "Overflow escape in " + std::string(yytext));
+}
+
+"[" { return NetconfParser::make_LSQUARE_BRACKET(driver.loc_); }
+"]" { return NetconfParser::make_RSQUARE_BRACKET(driver.loc_); }
+"{" { return NetconfParser::make_LCURLY_BRACKET(driver.loc_); }
+"}" { return NetconfParser::make_RCURLY_BRACKET(driver.loc_); }
+"," { return NetconfParser::make_COMMA(driver.loc_); }
+":" { return NetconfParser::make_COLON(driver.loc_); }
+
+{int} {
+ /* An integer was found. */
+ std::string tmp(yytext);
+ int64_t integer = 0;
+ try {
+ /* In substring we want to use negative values (e.g. -1).
+ In enterprise-id we need to use values up to 0xffffffff.
+ To cover both of those use cases, we need at least
+ int64_t. */
+ integer = boost::lexical_cast<int64_t>(tmp);
+ } catch (const boost::bad_lexical_cast &) {
+ driver.error(driver.loc_, "Failed to convert " + tmp + " to an integer.");
+ }
+
+ /* The parser needs the string form as double conversion is no lossless */
+ return NetconfParser::make_INTEGER(integer, driver.loc_);
+}
+
+[-+]?[0-9]*\.?[0-9]*([eE][-+]?[0-9]+)? {
+ /* A floating point was found. */
+ std::string tmp(yytext);
+ double fp = 0.0;
+ try {
+ fp = boost::lexical_cast<double>(tmp);
+ } catch (const boost::bad_lexical_cast &) {
+ driver.error(driver.loc_, "Failed to convert " + tmp + " to a floating point.");
+ }
+
+ return NetconfParser::make_FLOAT(fp, driver.loc_);
+}
+
+true|false {
+ string tmp(yytext);
+ return NetconfParser::make_BOOLEAN(tmp == "true", driver.loc_);
+}
+
+null {
+ return NetconfParser::make_NULL_TYPE(driver.loc_);
+}
+
+(?i:true) driver.error (driver.loc_, "JSON true reserved keyword is lower case only");
+
+(?i:false) driver.error (driver.loc_, "JSON false reserved keyword is lower case only");
+
+(?i:null) driver.error (driver.loc_, "JSON null reserved keyword is lower case only");
+
+<*>. driver.error (driver.loc_, "Invalid character: " + std::string(yytext));
+
+<<EOF>> {
+ if (driver.states_.empty()) {
+ return NetconfParser::make_END(driver.loc_);
+ }
+ driver.loc_ = driver.locs_.back();
+ driver.locs_.pop_back();
+ driver.file_ = driver.files_.back();
+ driver.files_.pop_back();
+ if (driver.sfile_) {
+ fclose(driver.sfile_);
+ driver.sfile_ = 0;
+ }
+ if (!driver.sfiles_.empty()) {
+ driver.sfile_ = driver.sfiles_.back();
+ driver.sfiles_.pop_back();
+ }
+ netconf__delete_buffer(YY_CURRENT_BUFFER);
+ netconf__switch_to_buffer(driver.states_.back());
+ driver.states_.pop_back();
+
+ BEGIN(DIR_EXIT);
+}
+
+%%
+
+using namespace isc::dhcp;
+
+void
+ParserContext::scanStringBegin(const std::string& str, ParserType parser_type)
+{
+ start_token_flag = true;
+ start_token_value = parser_type;
+
+ file_ = "<string>";
+ sfile_ = 0;
+ loc_.initialize(&file_);
+ yy_flex_debug = trace_scanning_;
+ YY_BUFFER_STATE buffer;
+ buffer = netconf__scan_bytes(str.c_str(), str.size());
+ if (!buffer) {
+ fatal("cannot scan string");
+ /* fatal() throws an exception so this can't be reached */
+ }
+}
+
+void
+ParserContext::scanFileBegin(FILE * f,
+ const std::string& filename,
+ ParserType parser_type)
+{
+ start_token_flag = true;
+ start_token_value = parser_type;
+
+ file_ = filename;
+ sfile_ = f;
+ loc_.initialize(&file_);
+ yy_flex_debug = trace_scanning_;
+ YY_BUFFER_STATE buffer;
+
+ /* See netconf_lexer.cc header for available definitions */
+ buffer = netconf__create_buffer(f, 65536 /*buffer size*/);
+ if (!buffer) {
+ fatal("cannot scan file " + filename);
+ }
+ netconf__switch_to_buffer(buffer);
+}
+
+void
+ParserContext::scanEnd() {
+ if (sfile_)
+ fclose(sfile_);
+ sfile_ = 0;
+ static_cast<void>(netconf_lex_destroy());
+ /* Close files */
+ while (!sfiles_.empty()) {
+ FILE* f = sfiles_.back();
+ if (f) {
+ fclose(f);
+ }
+ sfiles_.pop_back();
+ }
+ /* Delete states */
+ while (!states_.empty()) {
+ netconf__delete_buffer(states_.back());
+ states_.pop_back();
+ }
+}
+
+void
+ParserContext::includeFile(const std::string& filename) {
+ if (states_.size() > 10) {
+ fatal("Too many nested include.");
+ }
+
+ FILE* f = fopen(filename.c_str(), "r");
+ if (!f) {
+ fatal("Can't open include file " + filename);
+ }
+ if (sfile_) {
+ sfiles_.push_back(sfile_);
+ }
+ sfile_ = f;
+ states_.push_back(YY_CURRENT_BUFFER);
+ YY_BUFFER_STATE buffer;
+ buffer = netconf__create_buffer(f, 65536 /*buffer size*/);
+ if (!buffer) {
+ fatal( "Can't scan include file " + filename);
+ }
+ netconf__switch_to_buffer(buffer);
+ files_.push_back(file_);
+ file_ = filename;
+ locs_.push_back(loc_);
+ loc_.initialize(&file_);
+
+ BEGIN(INITIAL);
+}
+
+namespace {
+/** To avoid unused function error */
+class Dummy {
+ /* cppcheck-suppress unusedPrivateFunction */
+ void dummy() { yy_fatal_error("Fix me: how to disable its definition?"); }
+};
+}
+#endif /* !__clang_analyzer__ */
--- /dev/null
+/* Copyright (C) 2018 Internet Systems Consortium, Inc. ("ISC")
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+%skeleton "lalr1.cc" /* -*- C++ -*- */
+%require "3.0.0"
+%defines
+%define parser_class_name {NetconfParser}
+%define api.prefix {netconf_}
+%define api.token.constructor
+%define api.value.type variant
+%define api.namespace {isc::netconf}
+%define parse.assert
+%code requires
+{
+#include <string>
+#include <cc/data.h>
+#include <boost/lexical_cast.hpp>
+#include <netconf/parser_context_decl.h>
+
+using namespace isc::netconf;
+using namespace isc::data;
+using namespace std;
+}
+// The parsing context.
+%param { isc::netconf::ParserContext& ctx }
+%locations
+%define parse.trace
+%define parse.error verbose
+%code
+{
+#include <netconf/parser_context.h>
+}
+
+
+%define api.token.prefix {TOKEN_}
+// Tokens in an order which makes sense and related to the intented use.
+// Actual regexps for tokens are defined in netconf_lexer.ll.
+%token
+ END 0 "end of file"
+ COMMA ","
+ COLON ":"
+ LSQUARE_BRACKET "["
+ RSQUARE_BRACKET "]"
+ LCURLY_BRACKET "{"
+ RCURLY_BRACKET "}"
+ NULL_TYPE "null"
+
+ NETCONF "Netconf"
+
+ USER_CONTEXT "user-context"
+ COMMENT "comment"
+
+ MANAGED_SERVERS "managed-servers"
+ DHCP4_SERVER "dhcp4"
+ DHCP6_SERVER "dhcp6"
+ D2_SERVER "d2"
+ CA_SERVER "ca"
+ MODEL "model"
+ CONTROL_SOCKET "control-socket"
+ TYPE "type"
+ UNIX "unix"
+ HTTP "http"
+ STDOUT "stdout"
+ NAME "name"
+ HOST "host"
+ PORT "port"
+
+ HOOKS_LIBRARIES "hooks-libraries"
+ LIBRARY "library"
+ PARAMETERS "parameters"
+
+ LOGGING "Logging"
+ LOGGERS "loggers"
+ OUTPUT_OPTIONS "output_options"
+ OUTPUT "output"
+ DEBUGLEVEL "debuglevel"
+ SEVERITY "severity"
+ FLUSH "flush"
+ MAXSIZE "maxsize"
+ MAXVER "maxver"
+
+ DHCP4 "Dhcp4"
+ DHCP6 "Dhcp6"
+ DHCPDDNS "DhcpDdns"
+ CONTROL_AGENT "Control-agent"
+
+ // Not real tokens, just a way to signal what the parser is expected to
+ // parse. This define the starting point. It either can be full grammar
+ // (START_NETCONF), part of the grammar related to control-netconf (START_SUB_NETCONF)
+ // or can be any valid JSON (START_JSON)
+ START_JSON
+ START_NETCONF
+ START_SUB_NETCONF
+;
+
+%token <std::string> STRING "constant string"
+%token <int64_t> INTEGER "integer"
+%token <double> FLOAT "floating point"
+%token <bool> BOOLEAN "boolean"
+
+%type <ElementPtr> value
+%type <ElementPtr> map_value
+%type <ElementPtr> socket_type_value
+
+%printer { yyoutput << $$; } <*>;
+
+%%
+
+// The whole grammar starts with a map, because the config file
+// consists of Control-Netconf, DhcpX, Logger and DhcpDdns entries in one big { }.
+%start start;
+
+// The starting token can be one of those listed below. Note these are
+// "fake" tokens. They're produced by the lexer before any input text
+// is parsed.
+start: START_JSON { ctx.ctx_ = ctx.NO_KEYWORDS; } json
+ | START_NETCONF { ctx.ctx_ = ctx.CONFIG; } netconf_syntax_map
+ | START_SUB_NETCONF { ctx.ctx_ = ctx.NETCONF; } sub_netconf
+ ;
+
+// This rule defines a "shortcut". Instead of specifying the whole structure
+// expected by full grammar, we can tell the parser to start from content of
+// the Control-netconf. This is very useful for unit-testing, so we don't need
+// to repeat the outer map and "Control-netconf" map. We can simply provide
+// the contents of that map.
+sub_netconf: LCURLY_BRACKET {
+ // Parse the Control-netconf map
+ ElementPtr m(new MapElement(ctx.loc2pos(@1)));
+ ctx.stack_.push_back(m);
+} global_params RCURLY_BRACKET {
+ // parsing completed
+};
+
+// --- generic JSON parser -----------------------------------------------------
+
+// json expression can be a value. What value means is defined below.
+json: value {
+ // Push back the JSON value on the stack
+ ctx.stack_.push_back($1);
+};
+
+// Rules for value. This can be one of the primary types allowed in JSON.
+value: INTEGER { $$ = ElementPtr(new IntElement($1, ctx.loc2pos(@1))); }
+ | FLOAT { $$ = ElementPtr(new DoubleElement($1, ctx.loc2pos(@1))); }
+ | BOOLEAN { $$ = ElementPtr(new BoolElement($1, ctx.loc2pos(@1))); }
+ | STRING { $$ = ElementPtr(new StringElement($1, ctx.loc2pos(@1))); }
+ | NULL_TYPE { $$ = ElementPtr(new NullElement(ctx.loc2pos(@1))); }
+ | map { $$ = ctx.stack_.back(); ctx.stack_.pop_back(); }
+ | list_generic { $$ = ctx.stack_.back(); ctx.stack_.pop_back(); }
+ ;
+
+// Rule for map. It will start with {, have some content and will end with }.
+map: LCURLY_BRACKET {
+ // This code is executed when we're about to start parsing
+ // the content of the map
+ ElementPtr m(new MapElement(ctx.loc2pos(@1)));
+ ctx.stack_.push_back(m);
+} map_content RCURLY_BRACKET {
+ // map parsing completed. If we ever want to do any wrap up
+ // (maybe some sanity checking), this would be the best place
+ // for it.
+};
+
+map_value: map { $$ = ctx.stack_.back(); ctx.stack_.pop_back(); };
+
+// Rule for map content. In some cases it is allowed to have an empty map,
+// so we should say that explicitly. In most cases, though, there will
+// be some actual content inside. That's defined by not_empty_map
+map_content: %empty // empty map
+ | not_empty_map
+ ;
+
+// Rule for content of the map. It can have one of two formats:
+// 1) string: value
+// 2) non_empty_map , string: value
+// The first case covers a single entry, while the second case
+// covers all longer lists recursively.
+not_empty_map: STRING COLON value {
+ // map containing a single entry
+ ctx.stack_.back()->set($1, $3);
+ }
+ | not_empty_map COMMA STRING COLON value {
+ // map consisting of a shorter map followed by
+ // comma and string:value
+ ctx.stack_.back()->set($3, $5);
+ }
+ ;
+
+list_generic: LSQUARE_BRACKET {
+ ElementPtr l(new ListElement(ctx.loc2pos(@1)));
+ ctx.stack_.push_back(l);
+} list_content RSQUARE_BRACKET {
+};
+
+list_content: %empty // Empty list
+ | not_empty_list
+ ;
+
+not_empty_list: value {
+ // List consisting of a single element.
+ ctx.stack_.back()->add($1);
+ }
+ | not_empty_list COMMA value {
+ // List ending with , and a value.
+ ctx.stack_.back()->add($3);
+ }
+ ;
+
+// --- generic JSON parser ends here -------------------------------------------
+
+// --- syntax checking parser starts here --------------------------------------
+
+// Unknown keyword in a map. This clever rule can be added to any map
+// if you want to have a nice expression printed when unknown (mistyped?)
+// parameter is found.
+unknown_map_entry: STRING COLON {
+ const std::string& where = ctx.contextName();
+ const std::string& keyword = $1;
+ error(@1,
+ "got unexpected keyword \"" + keyword + "\" in " + where + " map.");
+};
+
+// This defines the top-level { } that holds Netconf, Dhcp6, Dhcp4,
+// DhcpDdns, Control-agent or Logging objects.
+netconf_syntax_map: LCURLY_BRACKET {
+ // This code is executed when we're about to start parsing
+ // the content of the map
+ ElementPtr m(new MapElement(ctx.loc2pos(@1)));
+ ctx.stack_.push_back(m);
+} global_objects RCURLY_BRACKET {
+ // map parsing completed. If we ever want to do any wrap up
+ // (maybe some sanity checking), this would be the best place
+ // for it.
+};
+
+// This represents top-level entries: Control-netconf, Logging, possibly others
+global_objects: global_object
+ | global_objects COMMA global_object
+ ;
+
+// This represents a single top level entry, e.g. Control-netconf, Dhcp6 or DhcpDdns.
+global_object: netconf_object
+ | logging_object
+ | dhcp4_json_object
+ | dhcp6_json_object
+ | dhcpddns_json_object
+ | control_agent_object
+ | unknown_map_entry
+ ;
+
+// This define the Control-netconf object.
+netconf_object: NETCONF {
+
+ // Let's create a MapElement that will represent it, add it to the
+ // top level map (that's already on the stack) and put the new map
+ // on the stack as well, so child elements will be able to add
+ // themselves to it.
+ ElementPtr m(new MapElement(ctx.loc2pos(@1)));
+ ctx.stack_.back()->set("Netconf", m);
+ ctx.stack_.push_back(m);
+ ctx.enter(ctx.NETCONF);
+} COLON LCURLY_BRACKET global_params RCURLY_BRACKET {
+ // Ok, we're done with parsing control-netconf. Let's take the map
+ // off the stack.
+ ctx.stack_.pop_back();
+ ctx.leave();
+};
+
+global_params: global_param
+ | global_params COMMA global_param
+ ;
+
+// These are the parameters that are allowed in the top-level for
+// Netconf.
+global_param: managed_servers
+ | hooks_libraries
+ | user_context
+ | comment
+ | unknown_map_entry
+ ;
+
+user_context: USER_CONTEXT {
+ ctx.enter(ctx.NO_KEYWORDS);
+} COLON map_value {
+ ElementPtr parent = ctx.stack_.back();
+ ElementPtr user_context = $4;
+ ConstElementPtr old = parent->get("user-context");
+
+ // Handle already existing user context
+ if (old) {
+ // Check if it was a comment or a duplicate
+ if ((old->size() != 1) || !old->contains("comment")) {
+ std::stringstream msg;
+ msg << "duplicate user-context entries (previous at "
+ << old->getPosition().str() << ")";
+ error(@1, msg.str());
+ }
+ // Merge the comment
+ user_context->set("comment", old->get("comment"));
+ }
+
+ // Set the user context
+ parent->set("user-context", user_context);
+ ctx.leave();
+};
+
+comment: COMMENT {
+ ctx.enter(ctx.NO_KEYWORDS);
+} COLON STRING {
+ ElementPtr parent = ctx.stack_.back();
+ ElementPtr user_context(new MapElement(ctx.loc2pos(@1)));
+ ElementPtr comment(new StringElement($4, ctx.loc2pos(@4)));
+ user_context->set("comment", comment);
+
+ // Handle already existing user context
+ ConstElementPtr old = parent->get("user-context");
+ if (old) {
+ // Check for duplicate comment
+ if (old->contains("comment")) {
+ std::stringstream msg;
+ msg << "duplicate user-context/comment entries (previous at "
+ << old->getPosition().str() << ")";
+ error(@1, msg.str());
+ }
+ // Merge the user context in the comment
+ merge(user_context, old);
+ }
+
+ // Set the user context
+ parent->set("user-context", user_context);
+ ctx.leave();
+};
+
+// --- hooks-libraries ---------------------------------------------------------
+hooks_libraries: HOOKS_LIBRARIES {
+ ElementPtr l(new ListElement(ctx.loc2pos(@1)));
+ ctx.stack_.back()->set("hooks-libraries", l);
+ ctx.stack_.push_back(l);
+ ctx.enter(ctx.HOOKS_LIBRARIES);
+} COLON LSQUARE_BRACKET hooks_libraries_list RSQUARE_BRACKET {
+ ctx.stack_.pop_back();
+ ctx.leave();
+};
+
+hooks_libraries_list: %empty
+ | not_empty_hooks_libraries_list
+ ;
+
+not_empty_hooks_libraries_list: hooks_library
+ | not_empty_hooks_libraries_list COMMA hooks_library
+ ;
+
+hooks_library: LCURLY_BRACKET {
+ ElementPtr m(new MapElement(ctx.loc2pos(@1)));
+ ctx.stack_.back()->add(m);
+ ctx.stack_.push_back(m);
+} hooks_params RCURLY_BRACKET {
+ ctx.stack_.pop_back();
+};
+
+hooks_params: hooks_param
+ | hooks_params COMMA hooks_param
+ | unknown_map_entry
+ ;
+
+hooks_param: library
+ | parameters
+ ;
+
+library: LIBRARY {
+ ctx.enter(ctx.NO_KEYWORDS);
+} COLON STRING {
+ ElementPtr lib(new StringElement($4, ctx.loc2pos(@4)));
+ ctx.stack_.back()->set("library", lib);
+ ctx.leave();
+};
+
+parameters: PARAMETERS {
+ ctx.enter(ctx.NO_KEYWORDS);
+} COLON value {
+ ctx.stack_.back()->set("parameters", $4);
+ ctx.leave();
+};
+
+// --- hooks-libraries end here ------------------------------------------------
+
+// --- managed-servsers starts here ---------------------------------------------
+managed_servers: MANAGED_SERVERS COLON LCURLY_BRACKET {
+ ElementPtr m(new MapElement(ctx.loc2pos(@1)));
+ ctx.stack_.back()->set("managed-servers", m);
+ ctx.stack_.push_back(m);
+ ctx.enter(ctx.MANAGED_SERVERS);
+} servers_entries RCURLY_BRACKET {
+ ctx.stack_.pop_back();
+ ctx.leave();
+};
+
+servers_entries: %empty
+ | not_empty_servers_entries
+ ;
+
+not_empty_servers_entries: server_entry
+ | not_empty_servers_entries COMMA server_entry
+ ;
+
+
+// We currently support four types of servers: DHCPv4, DHCPv6, D2 and CA
+// (even though D2 socket support is not yet merged).
+server_entry: dhcp4_server
+ | dhcp6_server
+ | d2_server
+ | ca_server
+ | unknown_map_entry
+ ;
+
+// That's an entry for dhcp4 server.
+dhcp4_server: DHCP4_SERVER {
+ ElementPtr m(new MapElement(ctx.loc2pos(@1)));
+ ctx.stack_.back()->set("dhcp4", m);
+ ctx.stack_.push_back(m);
+ ctx.enter(ctx.SERVER);
+} COLON LCURLY_BRACKET managed_server_params RCURLY_BRACKET {
+ ctx.stack_.pop_back();
+ ctx.leave();
+};
+
+// That's an entry for dhcp6 server.
+dhcp6_server: DHCP6_SERVER {
+ ElementPtr m(new MapElement(ctx.loc2pos(@1)));
+ ctx.stack_.back()->set("dhcp6", m);
+ ctx.stack_.push_back(m);
+ ctx.enter(ctx.SERVER);
+} COLON LCURLY_BRACKET managed_server_params RCURLY_BRACKET {
+ ctx.stack_.pop_back();
+ ctx.leave();
+};
+
+// That's an entry for d2 server.
+d2_server: D2_SERVER {
+ ElementPtr m(new MapElement(ctx.loc2pos(@1)));
+ ctx.stack_.back()->set("d2", m);
+ ctx.stack_.push_back(m);
+ ctx.enter(ctx.SERVER);
+} COLON LCURLY_BRACKET managed_server_params RCURLY_BRACKET {
+ ctx.stack_.pop_back();
+ ctx.leave();
+};
+
+// That's an entry for ca server.
+ca_server: CA_SERVER {
+ ElementPtr m(new MapElement(ctx.loc2pos(@1)));
+ ctx.stack_.back()->set("ca", m);
+ ctx.stack_.push_back(m);
+ ctx.enter(ctx.SERVER);
+} COLON LCURLY_BRACKET managed_server_params RCURLY_BRACKET {
+ ctx.stack_.pop_back();
+ ctx.leave();
+};
+
+// Server parameters consist of one or more parameters.
+managed_server_params: managed_server_param
+ | managed_server_params COMMA managed_server_param
+ ;
+
+// We currently support two server parameters: model and control-socket.
+managed_server_param: model
+ | control_socket
+ | user_context
+ | comment
+ | unknown_map_entry
+ ;
+
+// YANG model
+model: MODEL {
+ ctx.enter(ctx.NO_KEYWORDS);
+} COLON STRING {
+ ElementPtr model(new StringElement($4, ctx.loc2pos(@4)));
+ ctx.stack_.back()->set("model", model);
+ ctx.leave();
+};
+
+// Control socket.
+control_socket: CONTROL_SOCKET {
+ ElementPtr m(new MapElement(ctx.loc2pos(@1)));
+ ctx.stack_.back()->set("control-socket", m);
+ ctx.stack_.push_back(m);
+ ctx.enter(ctx.CONTROL_SOCKET);
+} COLON LCURLY_BRACKET control_socket_params RCURLY_BRACKET {
+ ctx.stack_.pop_back();
+ ctx.leave();
+};
+
+// control-socket parameters
+control_socket_params: control_socket_param
+ | control_socket_params COMMA control_socket_param
+ ;
+
+control_socket_param: socket_type
+ | name
+ | host
+ | port
+ | user_context
+ | comment
+ | unknown_map_entry
+ ;
+
+socket_type: TYPE {
+ ctx.enter(ctx.SOCKET_TYPE);
+} COLON socket_type_value {
+ ctx.stack_.back()->set("type", $4);
+ ctx.leave();
+};
+
+// We currently allow unix, http and stdout control socket types.
+socket_type_value : UNIX { $$ = ElementPtr(new StringElement("unix", ctx.loc2pos(@1))); }
+ | HTTP { $$ = ElementPtr(new StringElement("http", ctx.loc2pos(@1))); }
+ | STDOUT { $$ = ElementPtr(new StringElement("stdout", ctx.loc2pos(@1))); }
+ ;
+// Unix name.
+name: NAME {
+ ctx.enter(ctx.NO_KEYWORDS);
+} COLON STRING {
+ ElementPtr name(new StringElement($4, ctx.loc2pos(@4)));
+ ctx.stack_.back()->set("name", name);
+ ctx.leave();
+};
+
+// HTTP host.
+host: HOST {
+ ctx.enter(ctx.NO_KEYWORDS);
+} COLON STRING {
+ ElementPtr host(new StringElement($4, ctx.loc2pos(@4)));
+ ctx.stack_.back()->set("host", host);
+ ctx.leave();
+};
+
+// HTTP PORT
+port: PORT COLON INTEGER {
+ ElementPtr port(new IntElement($3, ctx.loc2pos(@3)));
+ ctx.stack_.back()->set("port", port);
+};
+
+// --- managed-servers end here ------------------------------------------------
+
+// JSON entries for other global objects (Dhcp4,Dhcp6 and DhcpDdns)
+dhcp4_json_object: DHCP4 {
+ ctx.enter(ctx.NO_KEYWORDS);
+} COLON value {
+ ctx.stack_.back()->set("Dhcp4", $4);
+ ctx.leave();
+};
+
+dhcp6_json_object: DHCP6 {
+ ctx.enter(ctx.NO_KEYWORDS);
+} COLON value {
+ ctx.stack_.back()->set("Dhcp6", $4);
+ ctx.leave();
+};
+
+dhcpddns_json_object: DHCPDDNS {
+ ctx.enter(ctx.NO_KEYWORDS);
+} COLON value {
+ ctx.stack_.back()->set("DhcpDdns", $4);
+ ctx.leave();
+};
+
+control_agent_object: CONTROL_AGENT {
+ ctx.enter(ctx.NO_KEYWORDS);
+} COLON value {
+ ctx.stack_.back()->set("Control-agent", $4);
+ ctx.leave();
+};
+
+
+// --- Logging starts here -----------------------------------------------------
+
+// This defines the top level "Logging" object. It parses
+// the following "Logging": { ... }. The ... is defined
+// by logging_params
+logging_object: LOGGING {
+ ElementPtr m(new MapElement(ctx.loc2pos(@1)));
+ ctx.stack_.back()->set("Logging", m);
+ ctx.stack_.push_back(m);
+ ctx.enter(ctx.LOGGING);
+} COLON LCURLY_BRACKET logging_params RCURLY_BRACKET {
+ ctx.stack_.pop_back();
+ ctx.leave();
+};
+
+// This defines the list of allowed parameters that may appear
+// in the top-level Logging object. It can either be a single
+// parameter or several parameters separated by commas.
+logging_params: logging_param
+ | logging_params COMMA logging_param
+ ;
+
+// There's currently only one parameter defined, which is "loggers".
+logging_param: loggers;
+
+// "loggers", the only parameter currently defined in "Logging" object,
+// is "Loggers": [ ... ].
+loggers: LOGGERS {
+ ElementPtr l(new ListElement(ctx.loc2pos(@1)));
+ ctx.stack_.back()->set("loggers", l);
+ ctx.stack_.push_back(l);
+ ctx.enter(ctx.LOGGERS);
+} COLON LSQUARE_BRACKET loggers_entries RSQUARE_BRACKET {
+ ctx.stack_.pop_back();
+ ctx.leave();
+};
+
+// These are the parameters allowed in loggers: either one logger
+// entry or multiple entries separate by commas.
+loggers_entries: logger_entry
+ | loggers_entries COMMA logger_entry
+ ;
+
+// This defines a single entry defined in loggers in Logging.
+logger_entry: LCURLY_BRACKET {
+ ElementPtr l(new MapElement(ctx.loc2pos(@1)));
+ ctx.stack_.back()->add(l);
+ ctx.stack_.push_back(l);
+} logger_params RCURLY_BRACKET {
+ ctx.stack_.pop_back();
+};
+
+logger_params: logger_param
+ | logger_params COMMA logger_param
+ ;
+
+logger_param: name
+ | output_options_list
+ | debuglevel
+ | severity
+ | user_context
+ | comment
+ | unknown_map_entry
+ ;
+
+debuglevel: DEBUGLEVEL COLON INTEGER {
+ ElementPtr dl(new IntElement($3, ctx.loc2pos(@3)));
+ ctx.stack_.back()->set("debuglevel", dl);
+};
+
+severity: SEVERITY {
+ ctx.enter(ctx.NO_KEYWORDS);
+} COLON STRING {
+ ElementPtr sev(new StringElement($4, ctx.loc2pos(@4)));
+ ctx.stack_.back()->set("severity", sev);
+ ctx.leave();
+};
+
+output_options_list: OUTPUT_OPTIONS {
+ ElementPtr l(new ListElement(ctx.loc2pos(@1)));
+ ctx.stack_.back()->set("output_options", l);
+ ctx.stack_.push_back(l);
+ ctx.enter(ctx.OUTPUT_OPTIONS);
+} COLON LSQUARE_BRACKET output_options_list_content RSQUARE_BRACKET {
+ ctx.stack_.pop_back();
+ ctx.leave();
+};
+
+output_options_list_content: output_entry
+ | output_options_list_content COMMA output_entry
+ ;
+
+output_entry: LCURLY_BRACKET {
+ ElementPtr m(new MapElement(ctx.loc2pos(@1)));
+ ctx.stack_.back()->add(m);
+ ctx.stack_.push_back(m);
+} output_params_list RCURLY_BRACKET {
+ ctx.stack_.pop_back();
+};
+
+output_params_list: output_params
+ | output_params_list COMMA output_params
+ ;
+
+output_params: output
+ | flush
+ | maxsize
+ | maxver
+ ;
+
+output: OUTPUT {
+ ctx.enter(ctx.NO_KEYWORDS);
+} COLON STRING {
+ ElementPtr sev(new StringElement($4, ctx.loc2pos(@4)));
+ ctx.stack_.back()->set("output", sev);
+ ctx.leave();
+};
+
+flush: FLUSH COLON BOOLEAN {
+ ElementPtr flush(new BoolElement($3, ctx.loc2pos(@3)));
+ ctx.stack_.back()->set("flush", flush);
+};
+
+maxsize: MAXSIZE COLON INTEGER {
+ ElementPtr maxsize(new IntElement($3, ctx.loc2pos(@3)));
+ ctx.stack_.back()->set("maxsize", maxsize);
+};
+
+maxver: MAXVER COLON INTEGER {
+ ElementPtr maxver(new IntElement($3, ctx.loc2pos(@3)));
+ ctx.stack_.back()->set("maxver", maxver);
+};
+
+%%
+
+void
+isc::netconf::NetconfParser::error(const location_type& loc,
+ const std::string& what)
+{
+ ctx.error(loc, what);
+}
--- /dev/null
+// Copyright (C) 2018 Internet Systems Consortium, Inc. ("ISC")
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include <config.h>
+
+#include <netconf/parser_context.h>
+#include <netconf/netconf_parser.h>
+#include <exceptions/exceptions.h>
+//#include <cc/dhcp_config_error.h>
+#include <cc/data.h>
+#include <fstream>
+#include <limits>
+
+namespace isc {
+namespace netconf {
+
+ParserContext::ParserContext()
+ : sfile_(0), ctx_(NO_KEYWORDS), trace_scanning_(false), trace_parsing_(false)
+{
+}
+
+ParserContext::~ParserContext()
+{
+}
+
+isc::data::ElementPtr
+ParserContext::parseString(const std::string& str, ParserType parser_type)
+{
+ scanStringBegin(str, parser_type);
+ return (parseCommon());
+}
+
+isc::data::ElementPtr
+ParserContext::parseFile(const std::string& filename, ParserType parser_type) {
+ FILE* f = fopen(filename.c_str(), "r");
+ if (!f) {
+ isc_throw(ParseError, "Unable to open file " << filename);
+ }
+ scanFileBegin(f, filename, parser_type);
+ return (parseCommon());
+}
+
+isc::data::ElementPtr
+ParserContext::parseCommon() {
+ isc::netconf::NetconfParser parser(*this);
+ // Uncomment this to get detailed parser logs.
+ // trace_parsing_ = true;
+ parser.set_debug_level(trace_parsing_);
+ try {
+ int res = parser.parse();
+ if (res != 0) {
+ isc_throw(ParseError, "Parser abort");
+ }
+ scanEnd();
+ }
+ catch (...) {
+ scanEnd();
+ throw;
+ }
+ if (stack_.size() == 1) {
+ return (stack_[0]);
+ } else {
+ isc_throw(ParseError, "Expected exactly one terminal Element expected, found "
+ << stack_.size());
+ }
+}
+
+
+void
+ParserContext::error(const isc::netconf::location& loc, const std::string& what)
+{
+ isc_throw(ParseError, loc << ": " << what);
+}
+
+void
+ParserContext::error(const std::string& what)
+{
+ isc_throw(ParseError, what);
+}
+
+void
+ParserContext::fatal(const std::string& what)
+{
+ isc_throw(ParseError, what);
+}
+
+isc::data::Element::Position
+ParserContext::loc2pos(isc::netconf::location& loc)
+{
+ const std::string& file = *loc.begin.filename;
+ const uint32_t line = loc.begin.line;
+ const uint32_t pos = loc.begin.column;
+ return (isc::data::Element::Position(file, line, pos));
+}
+
+void
+ParserContext::require(const std::string& name,
+ isc::data::Element::Position open_loc,
+ isc::data::Element::Position close_loc)
+{
+ ConstElementPtr value = stack_.back()->get(name);
+ if (!value) {
+ isc_throw(ParseError,
+ "missing parameter '" << name << "' ("
+ << stack_.back()->getPosition() << ") ["
+ << contextName() << " map between "
+ << open_loc << " and " << close_loc << "]");
+ }
+}
+
+
+void
+ParserContext::enter(const LexerContext& ctx)
+{
+ cstack_.push_back(ctx_);
+ ctx_ = ctx;
+}
+
+void
+ParserContext::leave()
+{
+ if (cstack_.empty()) {
+ fatal("unbalanced syntactic context");
+ }
+ ctx_ = cstack_.back();
+ cstack_.pop_back();
+}
+
+const std::string
+ParserContext::contextName()
+{
+ switch (ctx_) {
+ case NO_KEYWORDS:
+ return ("__no keywords__");
+ case CONFIG:
+ return ("toplevel");
+ case NETCONF:
+ return ("Netconf");
+ case LOGGING:
+ return ("Logging");
+ case MANAGED_SERVERS:
+ return ("managed-servers");
+ case SERVER:
+ return ("[managed-]server");
+ case CONTROL_SOCKET:
+ return ("control-socket");
+ case SOCKET_TYPE:
+ return ("[socket-]type");
+ case HOOKS_LIBRARIES:
+ return ("hooks-libraries");
+ case LOGGERS:
+ return ("loggers");
+ case OUTPUT_OPTIONS:
+ return ("output-options");
+ default:
+ return ("__unknown__");
+ }
+}
+
+};
+};
--- /dev/null
+// Copyright (C) 2018 Internet Systems Consortium, Inc. ("ISC")
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef NETCONF_PARSER_CONTEXT_H
+#define NETCONF_PARSER_CONTEXT_H
+#include <string>
+#include <map>
+#include <vector>
+#include <netconf/netconf_parser.h>
+#include <netconf/parser_context_decl.h>
+#include <cc/dhcp_config_error.h>
+#include <exceptions/exceptions.h>
+
+// Tell Flex the lexer's prototype ...
+#define YY_DECL isc::netconf::NetconfParser::symbol_type netconf_lex (ParserContext& driver)
+
+// ... and declare it for the parser's sake.
+YY_DECL;
+
+namespace isc {
+namespace netconf {
+
+/// @brief Parser context is a wrapper around flex/bison instances dedicated to
+/// Netconf-agent config file parser.
+///
+/// It follows the same principle as other components. The primary interface
+/// are @ref parseString and @ref parseFile methods. All other methods are
+/// public for testing purposes only. This interface allows parsing the
+/// whole configuration with syntactic checking (which is by far the most
+/// frequent use), but it also allows parsing input as generic JSON or
+/// parse only content of the Netconf-agent object, which is a subset
+/// of full grammar (this will be very useful for unit-tests to not duplicate
+/// unnecessary parts of the config file).
+class ParserContext
+{
+public:
+
+ /// @brief Defines currently supported scopes
+ ///
+ /// NetconfParser is able to parse several types of scope. Usually,
+ /// when it parses a config file, it expects the data to have a map
+ /// with Netconf-agent in it and all the parameters within that map.
+ /// However, sometimes the parser is expected to parse only a subset
+ /// of that information.
+ typedef enum {
+ /// This parser will parse the content as generic JSON.
+ PARSER_JSON,
+
+ /// This parser will expect the content as Netconf config wrapped
+ /// in a map (that's the regular config file)
+ PARSER_NETCONF,
+
+ /// This parser will expect only the content of Netconf.
+ PARSER_SUB_NETCONF
+ } ParserType;
+
+ /// @brief Default constructor.
+ ParserContext();
+
+ /// @brief destructor
+ virtual ~ParserContext();
+
+ /// @brief JSON elements being parsed.
+ std::vector<isc::data::ElementPtr> stack_;
+
+ /// @brief Method called before scanning starts on a string.
+ ///
+ /// @param str string to be parsed
+ /// @param type specifies expected content
+ void scanStringBegin(const std::string& str, ParserType type);
+
+ /// @brief Method called before scanning starts on a file.
+ ///
+ /// @param f stdio FILE pointer
+ /// @param filename file to be parsed
+ /// @param type specifies expected content
+ void scanFileBegin(FILE* f, const std::string& filename, ParserType type);
+
+ /// @brief Method called after the last tokens are scanned.
+ void scanEnd();
+
+ /// @brief Divert input to an include file.
+ ///
+ /// @param filename file to be included
+ void includeFile(const std::string& filename);
+
+ /// @brief Run the parser on the string specified.
+ ///
+ /// This method parses specified string. Depending on the value of
+ /// parser_type, parser may either check only that the input is valid
+ /// JSON, or may do more specific syntax checking. See @ref ParserType
+ /// for supported syntax checkers.
+ ///
+ /// @param str string to be parsed
+ /// @param parser_type specifies expected content (usually NETCONF or generic JSON)
+ /// @return Element structure representing parsed text.
+ isc::data::ElementPtr parseString(const std::string& str,
+ ParserType parser_type);
+
+ /// @brief Run the parser on the file specified.
+ ///
+ /// This method parses specified file. Depending on the value of
+ /// parser_type, parser may either check only that the input is valid
+ /// JSON, or may do more specific syntax checking. See @ref ParserType
+ /// for supported syntax checkers.
+ ///
+ /// @param filename file to be parsed
+ /// @param parser_type specifies expected content (usually PARSER_NETCONF
+ /// or PARSER_JSON)
+ /// @return Element structure representing parsed text.
+ isc::data::ElementPtr parseFile(const std::string& filename,
+ ParserType parser_type);
+
+ /// @brief Error handler
+ ///
+ /// @param loc location within the parsed file when experienced a problem.
+ /// @param what string explaining the nature of the error.
+ /// @throw ParseError
+ void error(const isc::netconf::location& loc, const std::string& what);
+
+ /// @brief Error handler
+ ///
+ /// This is a simplified error reporting tool for possible future
+ /// cases when the NetconfParser is not able to handle the packet.
+ ///
+ /// @param what string explaining the nature of the error.
+ /// @throw ParseError
+ void error(const std::string& what);
+
+ /// @brief Fatal error handler
+ ///
+ /// This is for should not happen but fatal errors.
+ /// Used by YY_FATAL_ERROR macro so required to be static.
+ ///
+ /// @param what string explaining the nature of the error.
+ /// @throw ParseError
+ static void fatal(const std::string& what);
+
+ /// @brief Converts bison's position to one understandable by isc::data::Element
+ ///
+ /// Convert a bison location into an element position
+ /// (take the begin, the end is lost)
+ ///
+ /// @param loc location in bison format
+ /// @return Position in format accepted by Element
+ isc::data::Element::Position loc2pos(isc::netconf::location& loc);
+
+ /// @brief Check if a required parameter is present
+ ///
+ /// Check if a required parameter is present in the map at the top
+ /// of the stack and raise an error when it is not.
+ ///
+ /// @param name name of the parameter to check
+ /// @param open_loc location of the opening curly bracket
+ /// @param close_loc ocation of the closing curly bracket
+ /// @throw ParseError
+ void require(const std::string& name,
+ isc::data::Element::Position open_loc,
+ isc::data::Element::Position close_loc);
+
+ /// @brief Defines syntactic contexts for lexical tie-ins
+ typedef enum {
+ ///< This one is used in pure JSON mode.
+ NO_KEYWORDS,
+
+ ///< Used while parsing top level (that contains Netconf, Logging and others)
+ CONFIG,
+
+ ///< Used while parsing content of Netconf.
+ NETCONF,
+
+ ///< Used while parsing content of Logging.
+ LOGGING,
+
+ /// Used while parsing Netconf/managed-servers.
+ MANAGED_SERVERS,
+
+ ///< Used while parsing Netconf/managed-servers/*.
+ SERVER,
+
+ ///< Used while parsing Netconf/manages-servers/*/control-socket
+ CONTROL_SOCKET,
+
+ ///< Used while parsing Netconf/managed-servers/*/control-socket/type.
+ SOCKET_TYPE,
+
+ ///< Used while parsing Netconf/hooks-libraries.
+ HOOKS_LIBRARIES,
+
+ ///< Used while parsing Logging/loggers structures.
+ LOGGERS,
+
+ ///< Used while parsing Logging/loggers/output_options structures.
+ OUTPUT_OPTIONS
+
+ } LexerContext;
+
+ /// @brief File name
+ std::string file_;
+
+ /// @brief File name stack
+ std::vector<std::string> files_;
+
+ /// @brief Location of the current token
+ ///
+ /// The lexer will keep updating it. This variable will be useful
+ /// for logging errors.
+ isc::netconf::location loc_;
+
+ /// @brief Location stack
+ std::vector<isc::netconf::location> locs_;
+
+ /// @brief Lexer state stack
+ std::vector<struct yy_buffer_state*> states_;
+
+ /// @brief sFile (aka FILE)
+ FILE* sfile_;
+
+ /// @brief sFile (aka FILE) stack
+ ///
+ /// This is a stack of files. Typically there's only one file (the
+ /// one being currently parsed), but there may be more if one
+ /// file includes another.
+ std::vector<FILE*> sfiles_;
+
+ /// @brief Current syntactic context
+ LexerContext ctx_;
+
+ /// @brief Enter a new syntactic context
+ ///
+ /// Entering a new syntactic context is useful in several ways.
+ /// First, it allows the parser to avoid conflicts. Second, it
+ /// allows the lexer to return different tokens depending on
+ /// context (e.g. if "renew-timer" string is detected, the lexer
+ /// will return STRING token if in JSON mode or RENEW_TIMER if
+ /// in DHCP6 mode. Finally, the syntactic context allows the
+ /// error message to be more descriptive if the input string
+ /// does not parse properly. Netconf Agent parser uses simplified
+ /// contexts: either it recognizes keywords (value set to KEYWORDS)
+ /// or not (value set to NO_KEYWORDS).
+ ///
+ /// Make sure to call @ref leave() once the parsing of your
+ /// context is complete.
+ ///
+ /// @param ctx the syntactic context to enter into
+ void enter(const LexerContext& ctx);
+
+ /// @brief Leave a syntactic context
+ ///
+ /// @ref enter() must be called before (when entering a new scope
+ /// or context). Once you complete the parsing, this method
+ /// should be called.
+ ///
+ /// @throw isc::Unexpected if unbalanced (more leave() than enter() calls)
+ void leave();
+
+ /// @brief Get the syntactic context name
+ ///
+ /// @return printable name of the context.
+ const std::string contextName();
+
+ private:
+ /// @brief Flag determining scanner debugging.
+ bool trace_scanning_;
+
+ /// @brief Flag determining parser debugging.
+ bool trace_parsing_;
+
+ /// @brief Syntactic context stack
+ std::vector<LexerContext> cstack_;
+
+ /// @brief Common part of parseXXX
+ ///
+ /// @return Element structure representing parsed text.
+ isc::data::ElementPtr parseCommon();
+};
+
+}; // end of isc::netconf namespace
+}; // end of isc namespace
+
+#endif
--- /dev/null
+// Copyright (C) 2018 Internet Systems Consortium, Inc. ("ISC")
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef NETCONF_CONTEXT_DECL_H
+#define NETCONF_CONTEXT_DECL_H
+
+/// @file netconf/parser_context_decl.h Forward declaration of the ParserContext class
+
+namespace isc {
+namespace netconf {
+
+class ParserContext;
+
+}; // end of isc::dhcp namespace
+}; // end of isc namespace
+
+#endif
AM_CPPFLAGS += -I$(top_srcdir)/src -I$(top_builddir)/src
AM_CPPFLAGS += -I$(top_srcdir)/src/bin -I$(top_builddir)/src/bin
AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CPPFLAGS += -DCFG_EXAMPLES=\"$(abs_top_srcdir)/doc/examples/netconf\"
CLEANFILES = *.json *.log
TESTS += netconf_unittests
netconf_unittests_SOURCES = run_unittests.cc
-netconf_unittests_SOURCES += netconf_env_unittest.cc
+netconf_unittests_SOURCES += parser_unittests.cc
netconf_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
netconf_unittests_LDFLAGS = $(AM_LDFLAGS) $(CRYPTO_LDFLAGS)
netconf_unittests_LDADD = $(top_builddir)/src/bin/netconf/libnetconf.la
-#netconf_unittests_LDADD += $(top_builddir)/src/lib/cfgrpt/libcfgrpt.la
+netconf_unittests_LDADD += $(top_builddir)/src/lib/http/libkea-http.la
+netconf_unittests_LDADD += $(top_builddir)/src/lib/process/libkea-process.la
+netconf_unittests_LDADD += $(top_builddir)/src/lib/cfgrpt/libcfgrpt.la
#netconf_unittests_LDADD += $(top_builddir)/src/lib/dhcpsrv/libkea-dhcpsrv.la
#netconf_unittests_LDADD += $(top_builddir)/src/lib/dhcpsrv/testutils/libdhcpsrvtest.la
#netconf_unittests_LDADD += $(top_builddir)/src/lib/eval/libkea-eval.la
#netconf_unittests_LDADD += $(top_builddir)/src/lib/dhcp_ddns/libkea-dhcp_ddns.la
-#netconf_unittests_LDADD += $(top_builddir)/src/lib/testutils/libkea-testutils.la
-#netconf_unittests_LDADD += $(top_builddir)/src/lib/stats/libkea-stats.la
-#netconf_unittests_LDADD += $(top_builddir)/src/lib/config/libkea-cfgclient.la
-#netconf_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libkea-dhcp++.la
+netconf_unittests_LDADD += $(top_builddir)/src/lib/yang/libkea-yang.la
+netconf_unittests_LDADD += $(top_builddir)/src/lib/stats/libkea-stats.la
+netconf_unittests_LDADD += $(top_builddir)/src/lib/config/libkea-cfgclient.la
#netconf_unittests_LDADD += $(top_builddir)/src/lib/dhcp/tests/libdhcptest.la
-#netconf_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libkea-asiolink.la
-#netconf_unittests_LDADD += $(top_builddir)/src/lib/cc/libkea-cc.la
-#netconf_unittests_LDADD += $(top_builddir)/src/lib/dns/libkea-dns++.la
-#netconf_unittests_LDADD += $(top_builddir)/src/lib/cryptolink/libkea-cryptolink.la
-#netconf_unittests_LDADD += $(top_builddir)/src/lib/hooks/libkea-hooks.la
-netconf_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+netconf_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libkea-dhcp++.la
+netconf_unittests_LDADD += $(top_builddir)/src/lib/hooks/libkea-hooks.la
+netconf_unittests_LDADD += $(top_builddir)/src/lib/testutils/libkea-testutils.la
+netconf_unittests_LDADD += $(top_builddir)/src/lib/cc/libkea-cc.la
+netconf_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libkea-asiolink.la
+netconf_unittests_LDADD += $(top_builddir)/src/lib/dns/libkea-dns++.la
+netconf_unittests_LDADD += $(top_builddir)/src/lib/cryptolink/libkea-cryptolink.la
netconf_unittests_LDADD += $(top_builddir)/src/lib/log/libkea-log.la
+netconf_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
netconf_unittests_LDADD += $(top_builddir)/src/lib/util/threads/libkea-threads.la
netconf_unittests_LDADD += $(top_builddir)/src/lib/util/libkea-util.la
netconf_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libkea-exceptions.la
+++ /dev/null
-// Copyright (C) 2018 Internet Systems Consortium, Inc. ("ISC")
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this
-// file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-/// @brief Basic environment tests for netconf
-
-
-#include <sysrepo-cpp/Session.h>
-#include <gtest/gtest.h>
-
-class NetconfTest : public ::testing::Test {
-public:
-
- NetconfTest() {}
-
-};
-
-// This test checks if a session to sysrepo can be established.
-// It uses sysrepo code directly, so it's a sanity check for the
-// environment. Doesn't test any Kea code.
-TEST_F(NetconfTest, environmentSanity) {
-
- S_Connection conn;
-
- ASSERT_NO_THROW(conn.reset(new Connection("kea-netconf")));
-
- S_Session sess;
-
- ASSERT_NO_THROW(sess.reset(new Session(conn)));
-}
--- /dev/null
+// Copyright (C) 2018 Internet Systems Consortium, Inc. ("ISC")
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include <config.h>
+
+#include <gtest/gtest.h>
+#include <cc/data.h>
+#include <netconf/parser_context.h>
+#include <cc/dhcp_config_error.h>
+#include <testutils/io_utils.h>
+#include <testutils/user_context_utils.h>
+
+using namespace isc::data;
+using namespace isc::test;
+using namespace std;
+
+namespace isc {
+namespace netconf {
+namespace test {
+
+/// @brief compares two JSON trees
+///
+/// If differences are discovered, gtest failure is reported (using EXPECT_EQ)
+///
+/// @param a first to be compared
+/// @param b second to be compared
+void compareJSON(ConstElementPtr a, ConstElementPtr b) {
+ ASSERT_TRUE(a);
+ ASSERT_TRUE(b);
+ EXPECT_EQ(a->str(), b->str());
+}
+
+/// @brief Tests if the input string can be parsed with specific parser
+///
+/// The input text will be passed to bison parser of specified type.
+/// Then the same input text is passed to legacy JSON parser and outputs
+/// from both parsers are compared. The legacy comparison can be disabled,
+/// if the feature tested is not supported by the old parser (e.g.
+/// new comment styles)
+///
+/// @param txt text to be compared
+/// @param parser_type bison parser type to be instantiated
+/// @param compare whether to compare the output with legacy JSON parser
+void testParser(const std::string& txt, ParserContext::ParserType parser_type,
+ bool compare = true) {
+ ConstElementPtr test_json;
+
+ ASSERT_NO_THROW({
+ try {
+ ParserContext ctx;
+ test_json = ctx.parseString(txt, parser_type);
+ } catch (const std::exception &e) {
+ cout << "EXCEPTION: " << e.what() << endl;
+ throw;
+ }
+
+ });
+
+ if (!compare) {
+ return;
+ }
+
+ // Now compare if both representations are the same.
+ ElementPtr reference_json;
+ ASSERT_NO_THROW(reference_json = Element::fromJSON(txt, true));
+ compareJSON(reference_json, test_json);
+}
+
+TEST(ParserTest, mapInMap) {
+ string txt = "{ \"xyzzy\": { \"foo\": 123, \"baz\": 456 } }";
+ testParser(txt, ParserContext::PARSER_JSON);
+}
+
+TEST(ParserTest, listInList) {
+ string txt = "[ [ \"Britain\", \"Wales\", \"Scotland\" ], "
+ "[ \"Pomorze\", \"Wielkopolska\", \"Tatry\"] ]";
+ testParser(txt, ParserContext::PARSER_JSON);
+}
+
+TEST(ParserTest, nestedMaps) {
+ string txt = "{ \"europe\": { \"UK\": { \"London\": { \"street\": \"221B Baker\" }}}}";
+ testParser(txt, ParserContext::PARSER_JSON);
+}
+
+TEST(ParserTest, nestedLists) {
+ string txt = "[ \"half\", [ \"quarter\", [ \"eighth\", [ \"sixteenth\" ]]]]";
+ testParser(txt, ParserContext::PARSER_JSON);
+}
+
+TEST(ParserTest, listsInMaps) {
+ string txt = "{ \"constellations\": { \"orion\": [ \"rigel\", \"betelgeuse\" ], "
+ "\"cygnus\": [ \"deneb\", \"albireo\"] } }";
+ testParser(txt, ParserContext::PARSER_JSON);
+}
+
+TEST(ParserTest, mapsInLists) {
+ string txt = "[ { \"body\": \"earth\", \"gravity\": 1.0 },"
+ " { \"body\": \"mars\", \"gravity\": 0.376 } ]";
+ testParser(txt, ParserContext::PARSER_JSON);
+}
+
+TEST(ParserTest, types) {
+ string txt = "{ \"string\": \"foo\","
+ "\"integer\": 42,"
+ "\"boolean\": true,"
+ "\"map\": { \"foo\": \"bar\" },"
+ "\"list\": [ 1, 2, 3 ],"
+ "\"null\": null }";
+ testParser(txt, ParserContext::PARSER_JSON);
+}
+
+TEST(ParserTest, keywordJSON) {
+ string txt = "{ \"name\": \"user\","
+ "\"type\": \"password\","
+ "\"user\": \"name\","
+ "\"password\": \"type\" }";
+ testParser(txt, ParserContext::PARSER_JSON);
+}
+
+// This test checks that the DhcpDdns configuration is accepted
+// by the parser.
+TEST(ParserTest, keywordDhcpDdns) {
+ string txt =
+ "{ \"DhcpDdns\" : \n"
+ "{ \n"
+ " \"ip-address\": \"192.168.77.1\", \n"
+ " \"port\": 777 , \n "
+ " \"ncr-protocol\": \"UDP\", \n"
+ "\"tsig-keys\": [], \n"
+ "\"forward-ddns\" : {}, \n"
+ "\"reverse-ddns\" : {} \n"
+ "} \n"
+ "} \n";
+ testParser(txt, ParserContext::PARSER_NETCONF);
+}
+
+// This test checks that the Dhcp6 configuration is accepted
+// by the parser.
+TEST(ParserTest, keywordDhcp6) {
+ string txt = "{ \"Dhcp6\": { \"interfaces-config\": {"
+ " \"interfaces\": [ \"type\", \"htype\" ] },\n"
+ "\"preferred-lifetime\": 3000,\n"
+ "\"rebind-timer\": 2000, \n"
+ "\"renew-timer\": 1000, \n"
+ "\"subnet6\": [ { "
+ " \"pools\": [ { \"pool\": \"2001:db8:1::/64\" } ],"
+ " \"subnet\": \"2001:db8:1::/48\", "
+ " \"interface\": \"test\" } ],\n"
+ "\"valid-lifetime\": 4000 } }";
+ testParser(txt, ParserContext::PARSER_NETCONF);
+}
+
+// This test checks that the Dhcp4 configuration is accepted
+// by the parser.
+TEST(ParserTest, keywordDhcp4) {
+ string txt = "{ \"Dhcp4\": { \"interfaces-config\": {"
+ " \"interfaces\": [ \"type\", \"htype\" ] },\n"
+ "\"rebind-timer\": 2000, \n"
+ "\"renew-timer\": 1000, \n"
+ "\"subnet4\": [ { "
+ " \"pools\": [ { \"pool\": \"192.0.2.1 - 192.0.2.100\" } ],"
+ " \"subnet\": \"192.0.2.0/24\", "
+ " \"interface\": \"test\" } ],\n"
+ "\"valid-lifetime\": 4000 } }";
+ testParser(txt, ParserContext::PARSER_NETCONF);
+}
+
+// This test checks that the Control-agent configuration is accepted
+// by the parser.
+TEST(ParserTest, keywordControlAgent) {
+ string txt = "{ \"Control-agent\": {\n"
+ " \"http-host\": \"localhost\",\n"
+ " \"http-port\": 8000,\n"
+ " \"control-sockets\": {"
+ " \"dhcp4\": {"
+ " \"socket-type\": \"unix\","
+ " \"socket-name\": \"/path/to/the/unix/socket-v4\""
+ " },"
+ " \"dhcp6\": {"
+ " \"socket-type\": \"unix\","
+ " \"socket-name\": \"/path/to/the/unix/socket-v6\""
+ " },"
+ " \"d2\": {"
+ " \"socket-type\": \"unix\","
+ " \"socket-name\": \"/path/to/the/unix/socket-d2\""
+ " }"
+ " }"
+ "} }";
+ testParser(txt, ParserContext::PARSER_NETCONF);
+}
+
+// This test checks if full config (with top level and Netconf objects) can
+// be parsed with syntactic checking (and as pure JSON).
+TEST(ParserTest, keywordNetconf) {
+ string txt = "{ \"Netconf\": {\n"
+ " \"managed-servers\": {"
+ " \"dhcp4\": {"
+ " \"model\": \"kea-dhcp4-server\","
+ " \"control-socket\": {"
+ " \"type\": \"unix\","
+ " \"name\": \"/path/to/the/unix/socket-v4\""
+ " }"
+ " },"
+ " \"dhcp6\": {"
+ " \"model\": \"kea-dhcp6-server\","
+ " \"control-socket\": {"
+ " \"type\": \"http\","
+ " \"host\": \"127.0.0.1\","
+ " \"port\": 12345"
+ " }"
+ " },"
+ " \"d2\": {"
+ " \"model\": \"kea-dhcp-ddns\","
+ " \"control-socket\": {"
+ " \"type\": \"stdout\""
+ " }"
+ " },"
+ " \"ca\": {"
+ " \"model\": \"kea-ctrl-agent\","
+ " \"control-socket\": {"
+ " \"type\": \"http\","
+ " \"user-context\": { \"use default\": true }"
+ " }"
+ " }"
+ " },"
+ " \"hooks-libraries\": ["
+ " {"
+ " \"library\": \"/opt/local/control-agent-commands.so\","
+ " \"parameters\": {"
+ " \"param1\": \"foo\""
+ " }"
+ " }"
+ " ]"
+ "} }";
+ // This is a full config, so we'll parse it as full config (PARSER_NETCONF)
+ testParser(txt, ParserContext::PARSER_NETCONF);
+ testParser(txt, ParserContext::PARSER_JSON);
+}
+
+// This test checks if simplified config (without top level and Netconf
+// objects) can be parsed with syntactic checking (and as pure JSON).
+TEST(ParserTest, keywordSubNetconf) {
+
+ // This is similar to previous test, but note the lack of outer
+ // map and Netconf-agent.
+ string txt = "{\n"
+ " \"managed-servers\": {"
+ " \"dhcp4\": {"
+ " \"model\": \"kea-dhcp4-server\","
+ " \"control-socket\": {"
+ " \"type\": \"unix\","
+ " \"name\": \"/path/to/the/unix/socket-v4\""
+ " }"
+ " },"
+ " \"dhcp6\": {"
+ " \"model\": \"kea-dhcp6-server\","
+ " \"control-socket\": {"
+ " \"type\": \"http\","
+ " \"host\": \"127.0.0.1\","
+ " \"port\": 12345"
+ " }"
+ " },"
+ " \"d2\": {"
+ " \"model\": \"kea-dhcp-ddns\","
+ " \"control-socket\": {"
+ " \"type\": \"stdout\""
+ " }"
+ " },"
+ " \"ca\": {"
+ " \"model\": \"kea-ctrl-agent\","
+ " \"model\": \"kea-dhcp6-server\","
+ " \"control-socket\": {"
+ " \"type\": \"http\","
+ " \"user-context\": { \"use default\": true }"
+ " }"
+ " }"
+ " },"
+ " \"hooks-libraries\": ["
+ " {"
+ " \"library\": \"/opt/local/control-agent-commands.so\","
+ " \"parameters\": {"
+ " \"param1\": \"foo\""
+ " }"
+ " }"
+ " ]"
+ "}";
+
+ // This is only a subset of full config, so we'll parse with PARSER_SUB_NETCONF.
+ testParser(txt, ParserContext::PARSER_SUB_NETCONF);
+ testParser(txt, ParserContext::PARSER_JSON);
+}
+
+// Tests if bash (#) comments are supported. That's the only comment type that
+// was supported by the old parser.
+TEST(ParserTest, bashComments) {
+ string txt= "{ \"Netconf\": {"
+ " \"managed-servers\": {\n"
+ " \"d2\": {\n"
+ " \"model\": \"foo\",\n"
+ " \"control-socket\": {\n"
+ "# this is a comment\n"
+ "\"type\": \"unix\", \n"
+ "# This socket is mine. I can name it whatever\n"
+ "# I like, ok?\n"
+ "\"name\": \"Hector\" \n"
+ "} } } } }";
+ testParser(txt, ParserContext::PARSER_NETCONF);
+}
+
+// Tests if C++ (//) comments can start anywhere, not just in the first line.
+TEST(ParserTest, cppComments) {
+ string txt= "{ \"Netconf\": { // the level is over 9000!\n"
+ " \"managed-servers\": {\n"
+ " // Let's try talking to D2. Sadly, it never talks"
+ " // to us back :( Maybe he doesn't like his name?\n"
+ " \"d2\": {\n"
+ " \"model\": \"foo\",\n"
+ " \"control-socket\": {\n"
+ "\"type\": \"unix\", \n"
+ "\"name\": \"Hector\" \n"
+ "} } } } }";
+
+ testParser(txt, ParserContext::PARSER_NETCONF, false);
+}
+
+// Tests if bash (#) comments can start anywhere, not just in the first line.
+TEST(ParserTest, bashCommentsInline) {
+ string txt= "{ \"Netconf\": {"
+ " \"managed-servers\": {\n"
+ " \"d2\": {\n"
+ " \"model\": \"foo\",\n"
+ " \"control-socket\": {\n"
+ "\"type\": \"unix\", # Maybe Hector is not really a \n"
+ "\"name\": \"Hector\" # Unix process?\n"
+ "# Oh no! He's a windows one and just pretending!\n"
+ "} } } } }";
+ testParser(txt, ParserContext::PARSER_NETCONF, false);
+}
+
+// Tests if multi-line C style comments are handled correctly.
+TEST(ParserTest, multilineComments) {
+ string txt= "{ \"Netconf\": {"
+ " \"managed-servers\": {\n"
+ " \"dhcp4\": {\n"
+ " \"model\": \"foo\",\n"
+ " \"control-socket\": {\n"
+ " \"type\": \"stdout\"\n"
+ " }\n"
+ " }\n"
+ " /* Ok, forget about it. If Hector doesn't want to talk,\n"
+ " we won't talk to him either. We now have quiet days. */\n"
+ " /* \"d2\": {"
+ " \"model\": \"bar\",\n"
+ " \"control-socket\": {\n"
+ " \"type\": \"unix\",\n"
+ "\"name\": \"Hector\"\n"
+ "} }*/ } } }";
+ testParser(txt, ParserContext::PARSER_NETCONF, false);
+}
+
+// Tests if embedded comments are handled correctly.
+TEST(ParserTest, embbededComments) {
+ string txt= "{ \"Netconf\": {"
+ " \"comment\": \"a comment\","
+ " \"managed-servers\": {\n"
+ " \"dhcp4\": {\n"
+ " \"control-socket\": {\n"
+ " \"user-context\": { \"comment\": \"indirect\" },\n"
+ " \"type\": \"stdout\"\n"
+ " } } },\n"
+ " \"user-context\": { \"compatible\": true }\n"
+ "} }";
+ testParser(txt, ParserContext::PARSER_NETCONF, false);
+}
+
+/// @brief Loads specified example config file
+///
+/// This test loads specified example file twice: first, using the legacy
+/// JSON file and then second time using bison parser. Two created Element
+/// trees are then compared. The input is decommented before it is passed
+/// to legacy parser (as legacy support for comments is very limited).
+///
+/// @param fname name of the file to be loaded
+void testFile(const std::string& fname) {
+ ElementPtr json;
+ ElementPtr reference_json;
+ ConstElementPtr test_json;
+
+ string decommented = decommentJSONfile(fname);
+
+ cout << "Parsing file " << fname << "(" << decommented << ")" << endl;
+
+ EXPECT_NO_THROW(json = Element::fromJSONFile(decommented, true));
+ reference_json = moveComments(json);
+
+ // remove the temporary file
+ EXPECT_NO_THROW(::remove(decommented.c_str()));
+
+ EXPECT_NO_THROW(
+ try {
+ ParserContext ctx;
+ test_json = ctx.parseFile(fname, ParserContext::PARSER_NETCONF);
+ } catch (const std::exception &x) {
+ cout << "EXCEPTION: " << x.what() << endl;
+ throw;
+ });
+
+ ASSERT_TRUE(reference_json);
+ ASSERT_TRUE(test_json);
+
+ compareJSON(reference_json, test_json);
+}
+
+// This test loads all available existing files. Each config is loaded
+// twice: first with the existing Element::fromJSONFile() and then
+// the second time with NetconfParser. Both JSON trees are then compared.
+// Hopefully the list of example configs will grow over time.
+TEST(ParserTest, file) {
+ vector<string> configs;
+ configs.push_back("comments.json");
+ configs.push_back("simple.json");
+
+ for (int i = 0; i<configs.size(); i++) {
+ testFile(string(CFG_EXAMPLES) + "/" + configs[i]);
+ }
+}
+
+/// @brief Tests error conditions in NetconfParser
+///
+/// @param txt text to be parsed
+/// @param parser_type type of the parser to be used in the test
+/// @param msg expected content of the exception
+void testError(const std::string& txt,
+ ParserContext::ParserType parser_type,
+ const std::string& msg)
+{
+ try {
+ ParserContext ctx;
+ ConstElementPtr parsed = ctx.parseString(txt, parser_type);
+ FAIL() << "Expected ParseError but nothing was raised (expected: "
+ << msg << ")";
+ }
+ catch (const ParseError& ex) {
+ EXPECT_EQ(msg, ex.what());
+ }
+ catch (...) {
+ FAIL() << "Expected ParseError but something else was raised";
+ }
+}
+
+// Verify that error conditions are handled correctly.
+TEST(ParserTest, errors) {
+ // no input
+ testError("", ParserContext::PARSER_JSON,
+ "<string>:1.1: syntax error, unexpected end of file");
+ testError(" ", ParserContext::PARSER_JSON,
+ "<string>:1.2: syntax error, unexpected end of file");
+ testError("\n", ParserContext::PARSER_JSON,
+ "<string>:2.1: syntax error, unexpected end of file");
+ testError("\t", ParserContext::PARSER_JSON,
+ "<string>:1.2: syntax error, unexpected end of file");
+ testError("\r", ParserContext::PARSER_JSON,
+ "<string>:1.2: syntax error, unexpected end of file");
+
+ // comments
+ testError("# nothing\n",
+ ParserContext::PARSER_JSON,
+ "<string>:2.1: syntax error, unexpected end of file");
+ testError(" #\n",
+ ParserContext::PARSER_JSON,
+ "<string>:2.1: syntax error, unexpected end of file");
+ testError("// nothing\n",
+ ParserContext::PARSER_JSON,
+ "<string>:2.1: syntax error, unexpected end of file");
+ testError("/* nothing */\n",
+ ParserContext::PARSER_JSON,
+ "<string>:2.1: syntax error, unexpected end of file");
+ testError("/* no\nthing */\n",
+ ParserContext::PARSER_JSON,
+ "<string>:3.1: syntax error, unexpected end of file");
+ testError("/* no\nthing */\n\n",
+ ParserContext::PARSER_JSON,
+ "<string>:4.1: syntax error, unexpected end of file");
+ testError("/* nothing\n",
+ ParserContext::PARSER_JSON,
+ "Comment not closed. (/* in line 1");
+ testError("\n\n\n/* nothing\n",
+ ParserContext::PARSER_JSON,
+ "Comment not closed. (/* in line 4");
+ testError("{ /* */*/ }\n",
+ ParserContext::PARSER_JSON,
+ "<string>:1.3-8: Invalid character: *");
+ testError("{ /* // *// }\n",
+ ParserContext::PARSER_JSON,
+ "<string>:1.3-11: Invalid character: /");
+ testError("{ /* // */// }\n",
+ ParserContext::PARSER_JSON,
+ "<string>:2.1: syntax error, unexpected end of file, "
+ "expecting }");
+
+ // includes
+ testError("<?\n",
+ ParserContext::PARSER_JSON,
+ "Directive not closed.");
+ testError("<?include\n",
+ ParserContext::PARSER_JSON,
+ "Directive not closed.");
+ string file = string(CFG_EXAMPLES) + "/" + "simple.json";
+ testError("<?include \"" + file + "\"\n",
+ ParserContext::PARSER_JSON,
+ "Directive not closed.");
+ testError("<?include \"/foo/bar\" ?>/n",
+ ParserContext::PARSER_JSON,
+ "Can't open include file /foo/bar");
+
+ // JSON keywords
+ testError("{ \"foo\": True }",
+ ParserContext::PARSER_JSON,
+ "<string>:1.10-13: JSON true reserved keyword is lower case only");
+ testError("{ \"foo\": False }",
+ ParserContext::PARSER_JSON,
+ "<string>:1.10-14: JSON false reserved keyword is lower case only");
+ testError("{ \"foo\": NULL }",
+ ParserContext::PARSER_JSON,
+ "<string>:1.10-13: JSON null reserved keyword is lower case only");
+ testError("{ \"foo\": Tru }",
+ ParserContext::PARSER_JSON,
+ "<string>:1.10: Invalid character: T");
+ testError("{ \"foo\": nul }",
+ ParserContext::PARSER_JSON,
+ "<string>:1.10: Invalid character: n");
+
+ // numbers
+ testError("123",
+ ParserContext::PARSER_NETCONF,
+ "<string>:1.1-3: syntax error, unexpected integer, "
+ "expecting {");
+ testError("-456",
+ ParserContext::PARSER_NETCONF,
+ "<string>:1.1-4: syntax error, unexpected integer, "
+ "expecting {");
+ testError("-0001",
+ ParserContext::PARSER_NETCONF,
+ "<string>:1.1-5: syntax error, unexpected integer, "
+ "expecting {");
+ testError("1234567890123456789012345678901234567890",
+ ParserContext::PARSER_JSON,
+ "<string>:1.1-40: Failed to convert "
+ "1234567890123456789012345678901234567890"
+ " to an integer.");
+ testError("-3.14e+0",
+ ParserContext::PARSER_NETCONF,
+ "<string>:1.1-8: syntax error, unexpected floating point, "
+ "expecting {");
+ testError("1e50000",
+ ParserContext::PARSER_JSON,
+ "<string>:1.1-7: Failed to convert 1e50000 "
+ "to a floating point.");
+
+ // strings
+ testError("\"aabb\"",
+ ParserContext::PARSER_NETCONF,
+ "<string>:1.1-6: syntax error, unexpected constant string, "
+ "expecting {");
+ testError("{ \"aabb\"err",
+ ParserContext::PARSER_JSON,
+ "<string>:1.9: Invalid character: e");
+ testError("{ err\"aabb\"",
+ ParserContext::PARSER_JSON,
+ "<string>:1.3: Invalid character: e");
+ testError("\"a\n\tb\"",
+ ParserContext::PARSER_JSON,
+ "<string>:1.1-6: Invalid control in \"a\n\tb\"");
+ testError("\"a\\n\\tb\"",
+ ParserContext::PARSER_NETCONF,
+ "<string>:1.1-8: syntax error, unexpected constant string, "
+ "expecting {");
+ testError("\"a\\x01b\"",
+ ParserContext::PARSER_JSON,
+ "<string>:1.1-8: Bad escape in \"a\\x01b\"");
+ testError("\"a\\u0162\"",
+ ParserContext::PARSER_JSON,
+ "<string>:1.1-9: Unsupported unicode escape in \"a\\u0162\"");
+ testError("\"a\\u062z\"",
+ ParserContext::PARSER_JSON,
+ "<string>:1.1-9: Bad escape in \"a\\u062z\"");
+ testError("\"abc\\\"",
+ ParserContext::PARSER_JSON,
+ "<string>:1.1-6: Overflow escape in \"abc\\\"");
+
+ // from data_unittest.c
+ testError("\\a",
+ ParserContext::PARSER_JSON,
+ "<string>:1.1: Invalid character: \\");
+ testError("\\",
+ ParserContext::PARSER_JSON,
+ "<string>:1.1: Invalid character: \\");
+ testError("\\\"\\\"",
+ ParserContext::PARSER_JSON,
+ "<string>:1.1: Invalid character: \\");
+
+ // want a map
+ testError("[]\n",
+ ParserContext::PARSER_NETCONF,
+ "<string>:1.1: syntax error, unexpected [, "
+ "expecting {");
+ testError("[]\n",
+ ParserContext::PARSER_NETCONF,
+ "<string>:1.1: syntax error, unexpected [, "
+ "expecting {");
+ testError("{ 123 }\n",
+ ParserContext::PARSER_JSON,
+ "<string>:1.3-5: syntax error, unexpected integer, "
+ "expecting }");
+ testError("{ 123 }\n",
+ ParserContext::PARSER_NETCONF,
+ "<string>:1.3-5: syntax error, unexpected integer");
+ testError("{ \"foo\" }\n",
+ ParserContext::PARSER_JSON,
+ "<string>:1.9: syntax error, unexpected }, "
+ "expecting :");
+ testError("{ \"foo\" }\n",
+ ParserContext::PARSER_NETCONF,
+ "<string>:1.9: syntax error, unexpected }, expecting :");
+ testError("{ \"foo\":null }\n",
+ ParserContext::PARSER_NETCONF,
+ "<string>:1.3-7: got unexpected keyword "
+ "\"foo\" in toplevel map.");
+ testError("{ \"Netconf\" }\n",
+ ParserContext::PARSER_NETCONF,
+ "<string>:1.13: syntax error, unexpected }, "
+ "expecting :");
+ testError("{ \"Netconf\":",
+ ParserContext::PARSER_NETCONF,
+ "<string>:1.13: syntax error, unexpected end of file, "
+ "expecting {");
+ testError("{}{}\n",
+ ParserContext::PARSER_JSON,
+ "<string>:1.3: syntax error, unexpected {, "
+ "expecting end of file");
+
+ // bad commas
+ testError("{ , }\n",
+ ParserContext::PARSER_JSON,
+ "<string>:1.3: syntax error, unexpected \",\", "
+ "expecting }");
+ testError("{ , \"foo\":true }\n",
+ ParserContext::PARSER_JSON,
+ "<string>:1.3: syntax error, unexpected \",\", "
+ "expecting }");
+ testError("{ \"foo\":true, }\n",
+ ParserContext::PARSER_JSON,
+ "<string>:1.15: syntax error, unexpected }, "
+ "expecting constant string");
+
+ // bad type
+ testError("{ \"Netconf\":{\n"
+ " \"managed-servers\":false }}\n",
+ ParserContext::PARSER_NETCONF,
+ "<string>:2.21-25: syntax error, unexpected boolean, "
+ "expecting {");
+
+ // unknown keyword
+ testError("{ \"Netconf\":{\n"
+ " \"topping\": \"Mozarella\" }}\n",
+ ParserContext::PARSER_NETCONF,
+ "<string>:2.2-10: got unexpected keyword "
+ "\"topping\" in Netconf map.");
+
+ // user context and embedded comments
+ testError("{ \"Netconf\":{\n"
+ " \"comment\": true } }\n",
+ ParserContext::PARSER_NETCONF,
+ "<string>:2.14-17: syntax error, unexpected boolean, "
+ "expecting constant string");
+
+ testError("{ \"Netconf\":{\n"
+ " \"user-context\": \"a comment\" } }\n",
+ ParserContext::PARSER_NETCONF,
+ "<string>:2.19-29: syntax error, unexpected constant string, "
+ "expecting {");
+
+ testError("{ \"Netconf\":{\n"
+ " \"comment\": \"a comment\",\n"
+ " \"comment\": \"another one\" } }\n",
+ ParserContext::PARSER_NETCONF,
+ "<string>:3.3-11: duplicate user-context/comment entries "
+ "(previous at <string>:2:3)");
+
+ testError("{ \"Netconf\":{\n"
+ " \"user-context\": { \"version\": 1 },\n"
+ " \"user-context\": { \"one\": \"only\" } } }\n",
+ ParserContext::PARSER_NETCONF,
+ "<string>:3.3-16: duplicate user-context entries "
+ "(previous at <string>:2:19)");
+
+ testError("{ \"Netconf\":{\n"
+ " \"user-context\": { \"comment\": \"indirect\" },\n"
+ " \"comment\": \"a comment\" } }\n",
+ ParserContext::PARSER_NETCONF,
+ "<string>:3.3-11: duplicate user-context/comment entries "
+ "(previous at <string>:2:19)");
+}
+
+// Check unicode escapes
+TEST(ParserTest, unicodeEscapes) {
+ ConstElementPtr result;
+ string json;
+
+ // check we can reread output
+ for (char c = -128; c < 127; ++c) {
+ string ins(" ");
+ ins[1] = c;
+ ConstElementPtr e(new StringElement(ins));
+ json = e->str();
+ ASSERT_NO_THROW(
+ try {
+ ParserContext ctx;
+ result = ctx.parseString(json, ParserContext::PARSER_JSON);
+ } catch (const std::exception &x) {
+ cout << "EXCEPTION: " << x.what() << endl;
+ throw;
+ });
+ ASSERT_EQ(Element::string, result->getType());
+ EXPECT_EQ(ins, result->stringValue());
+ }
+}
+
+// This test checks that all representations of a slash is recognized properly.
+TEST(ParserTest, unicodeSlash) {
+ // check the 4 possible encodings of solidus '/'
+ ConstElementPtr result;
+ string json = "\"/\\/\\u002f\\u002F\"";
+ ASSERT_NO_THROW(
+ try {
+ ParserContext ctx;
+ result = ctx.parseString(json, ParserContext::PARSER_JSON);
+ } catch (const std::exception &x) {
+ cout << "EXCEPTION: " << x.what() << endl;
+ throw;
+ });
+ ASSERT_EQ(Element::string, result->getType());
+ EXPECT_EQ("////", result->stringValue());
+}
+
+};
+};
+};