src/share/database/scripts/cql/Makefile
src/share/database/scripts/cql/upgrade_1.0_to_2.0.sh
src/share/database/scripts/cql/upgrade_2.0_to_3.0.sh
+ src/share/database/scripts/cql/upgrade_3.0_to_4.0.sh
src/share/database/scripts/cql/wipe_data.sh
src/share/database/scripts/mysql/Makefile
src/share/database/scripts/mysql/upgrade_1.0_to_2.0.sh
# Verify that kea-admin lease-version returns the correct version.
version=$($keaadmin lease-version cql -u $db_user -p $db_password -n $db_name)
- assert_str_eq "3.0" $version "Expected kea-admin to return %s, returned value was %s"
+ assert_str_eq "4.0" $version "Expected kea-admin to return %s, returned value was %s"
# Wipe the database.
cql_wipe
# 1433464245 corresponds to 2015-05-05 02:30:45
# 1436173267 corresponds to 2015-06-06 11:01:07
insert_cql="\
-INSERT INTO lease4(address, hwaddr, client_id, valid_lifetime, expire, subnet_id,\
- fqdn_fwd, fqdn_rev, hostname, state, user_context)\
- VALUES(-1073741302,textAsBlob('20'),textAsBlob('30'),40,1430694930,50,true,true,\
- 'one.example.com', 0, '');\
-INSERT INTO lease4(address, hwaddr, client_id, valid_lifetime, expire, subnet_id,\
- fqdn_fwd, fqdn_rev, hostname, state, user_context)\
- VALUES(-1073741301,NULL,textAsBlob('123'),40,1433464245,50,true,true,'', 1, '');\
-INSERT INTO lease4(address, hwaddr, client_id, valid_lifetime, expire, subnet_id,\
- fqdn_fwd, fqdn_rev, hostname, state, user_context)\
- VALUES(-1073741300,textAsBlob('22'),NULL,40,1436173267,50,true,true,\
- 'three.example.com', 2, '');"
+ INSERT INTO lease4 (address, hwaddr, client_id, valid_lifetime, expire, subnet_id, \
+ fqdn_fwd, fqdn_rev, hostname, state, user_context) \
+ VALUES (-1073741302,textAsBlob('20'),textAsBlob('30'),40,1430694930,50,true,true,'one.example.com', 0, '');\
+ INSERT INTO lease4 (address, hwaddr, client_id, valid_lifetime, expire, subnet_id, \
+ fqdn_fwd, fqdn_rev, hostname, state, user_context) \
+ VALUES (-1073741301,NULL,textAsBlob('123'),40,1433464245,50,true,true,'', 1, '');\
+ INSERT INTO lease4 (address, hwaddr, client_id, valid_lifetime, expire, subnet_id, \
+ fqdn_fwd, fqdn_rev, hostname, state, user_context) \
+ VALUES (-1073741300,textAsBlob('22'),NULL,40,1436173267,50,true,true,'three.example.com', 2, '');"
cql_execute "$insert_cql"
assert_eq 0 $? "insert into lease4 failed, expected exit code %d, actual %d"
# 1433464245 corresponds to 2015-05-05 02:30:45
# 1436173267 corresponds to 2015-06-06 11:01:07
insert_cql="\
-INSERT INTO lease6(address, duid, valid_lifetime, expire, subnet_id,\
- pref_lifetime, lease_type, iaid, prefix_len, fqdn_fwd, fqdn_rev, hostname,\
- hwaddr, hwtype, hwaddr_source, state, user_context)\
- VALUES('2001:db8::10',textAsBlob('20'),30,1430694930,40,50,1,60,70,true,true,\
- 'one.example.com',textAsBlob('80'),90,16,0,'');\
-INSERT INTO lease6(address, duid, valid_lifetime, expire, subnet_id,\
- pref_lifetime, lease_type, iaid, prefix_len, fqdn_fwd, fqdn_rev, hostname,\
- hwaddr, hwtype, hwaddr_source, state, user_context)\
- VALUES('2001:db8::11',NULL,30,1433464245,40,50,1,60,70,true,true,\
- '',textAsBlob('80'),90,1,1,'');\
-INSERT INTO lease6(address, duid, valid_lifetime, expire, subnet_id,\
- pref_lifetime, lease_type, iaid, prefix_len, fqdn_fwd, fqdn_rev, hostname,\
- hwaddr, hwtype, hwaddr_source, state, user_context)\
- VALUES('2001:db8::12',textAsBlob('21'),30,1436173267,40,50,1,60,70,true,true,\
- 'three.example.com',textAsBlob('80'),90,4,2,'');"
+ INSERT INTO lease6 (address, duid, valid_lifetime, expire, subnet_id, \
+ pref_lifetime, lease_type, iaid, prefix_len, fqdn_fwd, fqdn_rev, hostname, \
+ hwaddr, hwtype, hwaddr_source, state, user_context) \
+ VALUES ('2001:db8::10',textAsBlob('20'),30,1430694930,40,50,1,60,70,true,true, \
+ 'one.example.com',textAsBlob('80'),90,16,0,'');\
+ INSERT INTO lease6 (address, duid, valid_lifetime, expire, subnet_id, \
+ pref_lifetime, lease_type, iaid, prefix_len, fqdn_fwd, fqdn_rev, hostname, \
+ hwaddr, hwtype, hwaddr_source, state, user_context) \
+ VALUES ('2001:db8::11',NULL,30,1433464245,40,50,1,60,70,true,true, \
+ '',textAsBlob('80'),90,1,1,'');\
+ INSERT INTO lease6 (address, duid, valid_lifetime, expire, subnet_id, \
+ pref_lifetime, lease_type, iaid, prefix_len, fqdn_fwd, fqdn_rev, hostname, \
+ hwaddr, hwtype, hwaddr_source, state, user_context) \
+ VALUES ('2001:db8::12',textAsBlob('21'),30,1436173267,40,50,1,60,70,true,true, \
+ 'three.example.com',textAsBlob('80'),90,4,2,'');"
cql_execute "$insert_cql"
assert_eq 0 $? "insert into lease6 failed, expected exit code %d, actual %d"
# Check if the scripts directory exists at all.
if [ ! -d ${db_scripts_dir}/cql ]; then
- log_error "Invalid scripts directory: ${db_scripts_dir}/mysql"
+ log_error "Invalid scripts directory: ${db_scripts_dir}/cql"
exit 1
fi
cql_upgrade_schema_to_version 2.0
# Now we need insert some hosts to "migrate" for both v4 and v6
- qry=\
-"insert into host_reservations (id, host_identifier_type, host_identifier, host_ipv4_subnet_id, host_ipv6_subnet_id, option_subnet_id)\
- values (1, 0, textAsBlob('0123456'), 0, 0, 0);\
- insert into host_reservations (id, host_identifier_type, host_identifier, host_ipv4_subnet_id, host_ipv6_subnet_id, option_subnet_id)\
- values (2, 0, textAsBlob('1123456'), 4, 0, 4);\
- insert into host_reservations (id, host_identifier_type, host_identifier, host_ipv4_subnet_id, host_ipv6_subnet_id, option_subnet_id)\
- values (3, 0, textAsBlob('2123456'), 0, 6, 6);\
- insert into host_reservations (id, host_identifier_type, host_identifier, host_ipv4_subnet_id, host_ipv6_subnet_id, option_subnet_id)\
- values (4, 0, textAsBlob('3123456'), 4, 6, 0);\
- insert into host_reservations (id, host_identifier_type, host_identifier, host_ipv4_subnet_id, host_ipv6_subnet_id, option_subnet_id)\
- values (5, 0, textAsBlob('3123456'), -1, 6, 6);"
+ qry="\
+ INSERT INTO host_reservations (id, host_identifier_type, host_identifier, host_ipv4_subnet_id, host_ipv6_subnet_id, option_subnet_id) \
+ VALUES (1, 0, textAsBlob('0123456'), 0, 0, 0);\
+ INSERT INTO host_reservations (id, host_identifier_type, host_identifier, host_ipv4_subnet_id, host_ipv6_subnet_id, option_subnet_id) \
+ VALUES (2, 0, textAsBlob('1123456'), 4, 0, 4);\
+ INSERT INTO host_reservations (id, host_identifier_type, host_identifier, host_ipv4_subnet_id, host_ipv6_subnet_id, option_subnet_id) \
+ VALUES (3, 0, textAsBlob('2123456'), 0, 6, 6);\
+ INSERT INTO host_reservations (id, host_identifier_type, host_identifier, host_ipv4_subnet_id, host_ipv6_subnet_id, option_subnet_id) \
+ VALUES (4, 0, textAsBlob('3123456'), 4, 6, 0);\
+ INSERT INTO host_reservations (id, host_identifier_type, host_identifier, host_ipv4_subnet_id, host_ipv6_subnet_id, option_subnet_id) \
+ VALUES (5, 0, textAsBlob('3123456'), -1, 6, 6);"
cql_execute "$qry"
assert_eq 0 $? "insert hosts failed, expected exit code: %d, actual: %d"
# Fetch host_reservation data for comparison
echo "Exporting host_reservation data to $export_file ..."
- qry=\
-"select id, host_ipv4_subnet_id, host_ipv6_subnet_id, option_subnet_id from\
- host_reservations where id in(1,2,3,4,5);"
+ qry="\
+ SELECT id, host_ipv4_subnet_id, host_ipv6_subnet_id, option_subnet_id\
+ FROM hosts WHERE id IN (1,2,3,4,5) ALLOW FILTERING;"
- cql_execute "$qry" >$export_file
+ cql_execute "$qry" > $export_file
assert_eq 0 $? "insert hosts failed, expected exit code: %d, actual: %d"
# Compare the dump output to reference file, they should be identical.
- cmp -s $export_file $ref_file
+ cmp -s $export_file $ref_file
assert_eq 0 $? "export file does not match reference file, expected exit code %d, actual %d"
# remove the output file.
test_finish 0
}
+# Verifies that you can upgrade from an earlier version and
+# that all hosts and options from old host_reseravation table (version 3.0) are
+# converted to new schema (version 4.0) with new key partition key and are
+# moved to new hosts table
+cql_upgrade_hosts_test() {
+ test_start "cql.update_hosts_test"
+
+ # Let's wipe the whole database
+ cql_wipe
+
+ # We need to create an older database with lease data so we can
+ # verify the upgrade mechanisms which convert subnet id values
+ #
+ # Initialize database to schema 1.0.
+ cql_execute_script @abs_top_srcdir@/src/bin/admin/tests/dhcpdb_create_1.0.cql
+
+ # Now upgrade to schema 3.0, the version just before global HRs
+ cql_upgrade_schema_to_version 3.0
+
+ # Now we need insert some hosts to "migrate" for both v4 and v6
+ test_dir="@abs_top_srcdir@/src/bin/admin/tests"
+ data_file="$test_dir/data/cql.hosts_data_test.csv"
+ ref_file="$test_dir/data/cql.hosts_data_test.reference.csv"
+
+ output_dir="@abs_top_builddir@/src/bin/admin/tests"
+ export_file="$output_dir/data/cql.hosts_test.csv"
+ sorted_file="$output_dir/data/cql.hosts_test.sorted.csv"
+
+ # Fetch host_reservation data for comparison
+ echo "Exporting host_reservation data to $export_file ..."
+
+ qry="\
+ COPY host_reservations \
+ (id, host_identifier, host_identifier_type, host_ipv4_subnet_id, \
+ host_ipv6_subnet_id, host_ipv4_address, host_ipv4_next_server, \
+ host_ipv4_server_hostname, host_ipv4_boot_file_name, hostname, \
+ auth_key, user_context, host_ipv4_client_classes, \
+ host_ipv6_client_classes, reserved_ipv6_prefix_address, \
+ reserved_ipv6_prefix_length, reserved_ipv6_prefix_address_type, \
+ iaid, option_universe, option_code, option_value, \
+ option_formatted_value, option_space, option_is_persistent, \
+ option_client_class, option_subnet_id, option_user_context, \
+ option_scope_id) \
+ FROM '$data_file'"
+
+ cql_execute "$qry"
+ assert_eq 0 $? "insert hosts failed, expected exit code: %d, actual: %d"
+
+ # Ok, we have a 3.0 database with hosts and options. Let's upgrade it.
+ ${keaadmin} lease-upgrade cql -u $db_user -p $db_password -n $db_name -d $db_scripts_dir
+ ERRCODE=$?
+
+ # Upgrade should succeed
+ assert_eq 0 $ERRCODE "upgrade failed"
+
+ qry="\
+ COPY hosts \
+ (id, key, host_identifier, host_identifier_type, host_ipv4_subnet_id, \
+ host_ipv6_subnet_id, host_ipv4_address, host_ipv4_next_server, \
+ host_ipv4_server_hostname, host_ipv4_boot_file_name, hostname, \
+ auth_key, user_context, host_ipv4_client_classes, \
+ host_ipv6_client_classes, reserved_ipv6_prefix_address, \
+ reserved_ipv6_prefix_length, reserved_ipv6_prefix_address_type, \
+ iaid, option_universe, option_code, option_value, \
+ option_formatted_value, option_space, option_is_persistent, \
+ option_client_class, option_subnet_id, option_user_context, \
+ option_scope_id) \
+ TO '$export_file'"
+
+ cql_execute "$qry"
+ assert_eq 0 $? "insert hosts failed, expected exit code: %d, actual: %d"
+
+ # sort data so we can compare
+ cat $export_file | sort -V > $sorted_file
+
+ # Compare the dump output to reference file, they should be identical.
+ cmp -s $sorted_file $ref_file
+ assert_eq 0 $? "export file does not match reference file, expected exit code %d, actual %d"
+
+ # remove the output file.
+ rm $export_file
+
+ # remove the sorted file.
+ rm $sorted_file
+
+ # Wipe the database.
+ cql_wipe
+
+ # Report test success.
+ test_finish 0
+}
+
# Run tests.
cql_lease_init_test
cql_lease4_dump_test
cql_lease6_dump_test
cql_unused_subnet_id_test
+cql_upgrade_hosts_test
--- /dev/null
+1105657659805715115,0x4142434445464748494b,1,1,101,0,0,,,,,,,,2001:db8::1,128,0,0,1,17,0x,2495,dhcp6,False,,0,,3
+2129388898029710264,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,0,1,0x,312131,vendor-encapsulated-options,False,,0,,3\r
+2370585748436022247,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,0,67,0x,my-boot-file,dhcp4,True,,0,,3\r
+2459636980433777721,0x4142434445464748494b,1,1,101,0,0,,,,,,,,2001:db8::1,128,0,0,1,59,0x,my-boot-file,dhcp6,True,,0,{ \"comment\": \"a host reservation\" },3
+5403514763123047131,0x414243444547,0,1,101,-1073741307,0,,,,,,,,::,0,-1,-1,0,254,0x,192.0.2.3,dhcp4,False,,0,,3
+5501234819855171334,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,1,17,0x,2495,dhcp6,False,,0,,3\r
+5747907930644082856,0x4142434445464748494b,1,1,101,0,0,,,,,,,,2001:db8::1,128,0,0,1,1,0x,,isc2,True,,0,,3
+5955003068494074400,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,1,59,0x,my-boot-file,dhcp6,True,,0,,3\r
+6399013866147252670,0x4142434445464748494b,1,1,101,0,0,,,,,,,,2001:db8::1,128,0,0,1,32,0x,3600,dhcp6,False,,0,,3
+6783601303445960591,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,1,32,0x,3600,dhcp6,False,,0,,3\r
+7821756928114620236,0x414243444547,0,1,101,-1073741307,0,,,,,,,,::,0,-1,-1,0,23,0x,64,dhcp4,False,,0,,3
+8973539074684426388,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,0,1,0x,,isc,True,,0,,3\r
+9211831388923168274,0x414243444547,0,1,101,-1073741307,0,,,,,,,,::,0,-1,-1,0,2,0x,"10.0.0.5,10.0.0.3,10.0.3.4",isc,False,,0,,3
+-650687214220680074,0x4142434445464748494b,1,1,101,0,0,,,,,,,,2001:db8::1,128,0,0,1,1024,0x,2001:db8:1::1,dhcp6,False,,0,,3
+-1123502337428230752,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,0,254,0x,192.0.2.3,dhcp4,False,,0,,3\r
+-1210691761766369036,0x414243444547,0,1,101,-1073741307,0,,,,,,,,::,0,-1,-1,0,67,0x,my-boot-file,dhcp4,True,,0,{ \"comment\": \"a host reservation\" },3
+-3907456719587717375,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,0,2,0x,"10.0.0.5,10.0.0.3,10.0.3.4",isc,False,,0,,3\r
+-5169419091664697091,0x4142434445464748494b,1,1,101,0,0,,,,,,,,2001:db8::1,128,0,0,1,2,0x,"3000::1,3000::2,3000::3",isc2,False,,0,,3
+-6372549629773775261,0x414243444547,0,1,101,-1073741307,0,,,,,,,,::,0,-1,-1,0,1,0x,312131,vendor-encapsulated-options,False,,0,,3
+-6378104957663278797,0x414243444547,0,1,101,-1073741307,0,,,,,,,,::,0,-1,-1,0,1,0x,,isc,True,,0,,3
+-7724068518921719729,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,1,1024,0x,2001:db8:1::1,dhcp6,False,,0,,3\r
+-7804940982500935489,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,0,23,0x,64,dhcp4,False,,0,,3\r
+-8100784457292204371,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,1,1,0x,,isc2,True,,0,,3\r
+-8306770918748488616,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,1,2,0x,"3000::1,3000::2,3000::3",isc2,False,,0,,3\r
--- /dev/null
+1105657659805715115,5367868710821401223,0x4142434445464748494b,1,1,101,0,0,,,,,,,,2001:db8::1,128,0,0,1,17,0x,2495,dhcp6,False,,0,,3\r
+2129388898029710264,-4885955325122621964,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,0,1,0x,312131,vendor-encapsulated-options,False,,0,,3\r
+2370585748436022247,-4885955325122621964,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,0,67,0x,my-boot-file,dhcp4,True,,0,,3\r
+2459636980433777721,5367868710821401223,0x4142434445464748494b,1,1,101,0,0,,,,,,,,2001:db8::1,128,0,0,1,59,0x,my-boot-file,dhcp6,True,,0,{ \"comment\": \"a host reservation\" },3\r
+5403514763123047131,4012104063077014549,0x414243444547,0,1,101,-1073741307,0,,,,,,,,::,0,-1,-1,0,254,0x,192.0.2.3,dhcp4,False,,0,,3\r
+5501234819855171334,-4885955325122621964,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,1,17,0x,2495,dhcp6,False,,0,,3\r
+5747907930644082856,5367868710821401223,0x4142434445464748494b,1,1,101,0,0,,,,,,,,2001:db8::1,128,0,0,1,1,0x,,isc2,True,,0,,3\r
+5955003068494074400,-4885955325122621964,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,1,59,0x,my-boot-file,dhcp6,True,,0,,3\r
+6399013866147252670,5367868710821401223,0x4142434445464748494b,1,1,101,0,0,,,,,,,,2001:db8::1,128,0,0,1,32,0x,3600,dhcp6,False,,0,,3\r
+6783601303445960591,-4885955325122621964,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,1,32,0x,3600,dhcp6,False,,0,,3\r
+7821756928114620236,4012104063077014549,0x414243444547,0,1,101,-1073741307,0,,,,,,,,::,0,-1,-1,0,23,0x,64,dhcp4,False,,0,,3\r
+8973539074684426388,-4885955325122621964,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,0,1,0x,,isc,True,,0,,3\r
+9211831388923168274,4012104063077014549,0x414243444547,0,1,101,-1073741307,0,,,,,,,,::,0,-1,-1,0,2,0x,"10.0.0.5,10.0.0.3,10.0.3.4",isc,False,,0,,3\r
+-650687214220680074,5367868710821401223,0x4142434445464748494b,1,1,101,0,0,,,,,,,,2001:db8::1,128,0,0,1,1024,0x,2001:db8:1::1,dhcp6,False,,0,,3\r
+-1123502337428230752,-4885955325122621964,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,0,254,0x,192.0.2.3,dhcp4,False,,0,,3\r
+-1210691761766369036,4012104063077014549,0x414243444547,0,1,101,-1073741307,0,,,,,,,,::,0,-1,-1,0,67,0x,my-boot-file,dhcp4,True,,0,{ \"comment\": \"a host reservation\" },3\r
+-3907456719587717375,-4885955325122621964,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,0,2,0x,"10.0.0.5,10.0.0.3,10.0.3.4",isc,False,,0,,3\r
+-5169419091664697091,5367868710821401223,0x4142434445464748494b,1,1,101,0,0,,,,,,,,2001:db8::1,128,0,0,1,2,0x,"3000::1,3000::2,3000::3",isc2,False,,0,,3\r
+-6372549629773775261,4012104063077014549,0x414243444547,0,1,101,-1073741307,0,,,,,,,,::,0,-1,-1,0,1,0x,312131,vendor-encapsulated-options,False,,0,,3\r
+-6378104957663278797,4012104063077014549,0x414243444547,0,1,101,-1073741307,0,,,,,,,,::,0,-1,-1,0,1,0x,,isc,True,,0,,3\r
+-7724068518921719729,-4885955325122621964,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,1,1024,0x,2001:db8:1::1,dhcp6,False,,0,,3\r
+-7804940982500935489,-4885955325122621964,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,0,23,0x,64,dhcp4,False,,0,,3\r
+-8100784457292204371,-4885955325122621964,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,1,1,0x,,isc2,True,,0,,3\r
+-8306770918748488616,-4885955325122621964,0x414243444548,0,2,102,0,0,,,,,,,,2001:db8::1,128,0,0,1,2,0x,"3000::1,3000::2,3000::3",isc2,False,,0,,3\r
id | host_ipv4_subnet_id | host_ipv6_subnet_id | option_subnet_id
----+---------------------+---------------------+------------------
- 1 | -1 | -1 | -1
2 | 4 | -1 | 4
- 3 | -1 | 6 | 6
4 | 4 | 6 | -1
+ 3 | -1 | 6 | 6
5 | -1 | 6 | 6
+ 1 | -1 | -1 | -1
(5 rows)
constexpr uint32_t CQL_DRIVER_VERSION_MINOR = CASS_VERSION_MINOR;
/// @}
-/// Define CQL schema version: 3.0
+/// Define CQL schema version: 4.0
/// @{
-constexpr uint32_t CQL_SCHEMA_VERSION_MAJOR = 3u;
+constexpr uint32_t CQL_SCHEMA_VERSION_MAJOR = 4u;
constexpr uint32_t CQL_SCHEMA_VERSION_MINOR = 0u;
/// @}
namespace dhcp {
/// @brief Provides mechanisms for sending and retrieving data from the
-/// host_reservations table.
+/// hosts table.
class CqlHostExchange : public virtual CqlExchange {
public:
/// @brief Constructor
/// values which uniquely determine an entry in the table. Uses FNV-1a
/// on 64 bits.
///
- /// The primary key aggregates: host_ipv4_subnet_id, host_ipv6_subnet_id,
+ /// The primary key clustering column aggregates: host_dentifier,
+ /// host_identifier_type, host_ipv4_subnet_id, host_ipv6_subnet_id,
/// host_ipv4_address, reserved_ipv6_prefix_address,
/// reserved_ipv6_prefix_length, option_code, option_space.
- cass_int64_t hashIntoId() const;
+ uint64_t hashIntoId() const;
+
+ /// @brief Create unique key for storage in table key.
+ ///
+ /// The primary key partition key aggregates: host_dentifier,
+ /// host_identifier_type, host_ipv4_subnet_id, host_ipv6_subnet_id,
+ /// host_ipv4_address
+ uint64_t hashIntoKey() const;
+
+ /// @brief Create unique key string for a host.
+ ///
+ /// The primary key partition key aggregates: host_dentifier,
+ /// host_identifier_type, host_ipv4_subnet_id, host_ipv6_subnet_id,
+ /// host_ipv4_address
+ std::string hostKey() const;
/// @brief Copy received data into Host object
///
/// database)
const OptionWrapper retrieveOption() const;
- /// @brief Statement tags definitions
+ /// @brief Statement tags
/// @{
// Inserts all parameters belonging to any reservation from a single host.
- static constexpr StatementTag INSERT_HOST =
- "INSERT_HOST";
+ static constexpr StatementTag INSERT_HOST = "INSERT_HOST";
- // Retrieves hosts informations, IPv6 reservations and both IPv4 and IPv6
- // options associated with the hosts.
- static constexpr StatementTag GET_HOST =
- "GET_HOST";
+ // Retrieves hosts information, IPv6 reservations and both IPv4 and IPv6
+ // options associated with it.
+ static constexpr StatementTag GET_HOST = "GET_HOST";
// Retrieves host information, IPv6 reservations and both IPv4 and IPv6
- // options associated with the host.
- static constexpr StatementTag GET_HOST_BY_HOST_ID =
- "GET_HOST_BY_HOST_ID";
+ // options associated with it.
+ static constexpr StatementTag GET_HOST_BY_HOST_ID = "GET_HOST_BY_HOST_ID";
// Retrieves host information along with the IPv4 options associated
// with it.
- static constexpr StatementTag GET_HOST_BY_IPV4_ADDRESS =
- "GET_HOST_BY_IPV4_ADDRESS";
+ static constexpr StatementTag GET_HOST_BY_IPV4_ADDRESS = "GET_HOST_BY_IPV4_ADDRESS";
// Retrieves host information and IPv4 options using subnet identifier
// and client's identifier (i.e. hardware address or DUID).
"GET_HOST_BY_IPV4_SUBNET_ID_AND_HOST_ID";
// Retrieves host information; IPv6 reservations and IPv6 options
- // associated with a host using subnet identifier and client's
+ // associated with it using subnet identifier and client's
// identifier (i.e. hardware address or DUID).
static constexpr StatementTag GET_HOST_BY_IPV6_SUBNET_ID_AND_HOST_ID =
"GET_HOST_BY_IPV6_SUBNET_ID_AND_HOST_ID";
"GET_HOST_BY_IPV4_SUBNET_ID_AND_ADDRESS";
// Retrieves host information, IPv6 reservations and IPv6 options
- // associated with a host using prefix and prefix length. This query
- // returns host information for a single host. However, multiple rows
- // are returned due to left joining IPv6 reservations and IPv6 options.
- // The number of rows returned is multiplication of number of existing
- // IPv6 reservations and IPv6 options.
- static constexpr StatementTag GET_HOST_BY_IPV6_PREFIX =
- "GET_HOST_BY_IPV6_PREFIX";
+ // associated with it using prefix and prefix length. The query returns
+ // host information for a single host. However, multiple rows are returned
+ // due to left joining IPv6 reservations and IPv6 options. The number of
+ // rows returned is multiplication of number of existing IPv6 reservations
+ // and IPv6 options.
+ static constexpr StatementTag GET_HOST_BY_IPV6_PREFIX = "GET_HOST_BY_IPV6_PREFIX";
// Retrieves host information and IPv6 options for the host using subnet
// identifier and IPv6 reservation.
// associated with a host using subnet identifier.
static constexpr StatementTag GET_HOST_BY_IPV6_SUBNET_ID =
"GET_HOST_BY_IPV6_SUBNET_ID";
+
+ // Retrieves host information along with the IPv4 options associated
+ // with it using a subnet identifier from first host (paging).
+ static constexpr StatementTag GET_HOST_BY_IPV4_SUBNET_ID_LIMIT =
+ "GET_HOST_BY_IPV4_SUBNET_ID_LIMIT";
+
+ // Retrieves host information along with the IPv4 options associated
+ // with it using a subnet identifier from host (paging).
+ static constexpr StatementTag GET_HOST_BY_IPV4_SUBNET_ID_KEY =
+ "GET_HOST_BY_IPV4_SUBNET_ID_KEY";
+
+ // Retrieves host information along with the IPv4 options associated
+ // with it using a subnet identifier from next host (paging).
+ static constexpr StatementTag GET_HOST_BY_IPV4_SUBNET_ID_NEXT_KEY =
+ "GET_HOST_BY_IPV4_SUBNET_ID_NEXT_KEY";
+
+ // Retrieves host information along with the IPv4 options associated
+ // with it using a subnet identifier from host with a limit (paging).
+ static constexpr StatementTag GET_HOST_BY_IPV4_SUBNET_ID_PAGE =
+ "GET_HOST_BY_IPV4_SUBNET_ID_PAGE";
+
+ // Retrieves host information; IPv6 reservations and IPv6 options
+ // associated with it using subnet identifier from first host (paging).
+ static constexpr StatementTag GET_HOST_BY_IPV6_SUBNET_ID_LIMIT =
+ "GET_HOST_BY_IPV6_SUBNET_ID_LIMIT";
+
+ // Retrieves host information; IPv6 reservations and IPv6 options
+ // associated with it using subnet identifier from host (paging).
+ static constexpr StatementTag GET_HOST_BY_IPV6_SUBNET_ID_KEY =
+ "GET_HOST_BY_IPV6_SUBNET_ID_KEY";
+
+ // Retrieves host information; IPv6 reservations and IPv6 options
+ // associated with it using subnet identifier from next host (paging).
+ static constexpr StatementTag GET_HOST_BY_IPV6_SUBNET_ID_NEXT_KEY =
+ "GET_HOST_BY_IPV6_SUBNET_ID_NEXT_KEY";
+
+ // Retrieves host information; IPv6 reservations and IPv6 options
+ // associated with it using subnet identifier from host with a limit
+ // (paging).
+ static constexpr StatementTag GET_HOST_BY_IPV6_SUBNET_ID_PAGE =
+ "GET_HOST_BY_IPV6_SUBNET_ID_PAGE";
/// @}
/// @brief Cassandra statements
/// Pointer to Host object holding information being inserted into database.
HostPtr host_;
- /// @brief Primary key. Aggregates: host_identifier, host_identifier_type,
- /// reserved_ipv6_prefix_address, reserved_ipv6_prefix_length, option_code,
- /// option_space.
+ /// @brief Primary key. Partition key. Aggregates: host_dentifier,
+ /// host_identifier_type, host_ipv4_subnet_id host_ipv6_subnet_id,
+ /// host_ipv4_address
+ cass_int64_t key_;
+
+ /// @brief Primary key. Clustering key. Aggregates: host_identifier,
+ /// host_identifier_type, reserved_ipv6_prefix_address,
+ /// reserved_ipv6_prefix_length, option_code, option_space.
cass_int64_t id_;
/// @brief Client's identifier (e.g. DUID, HW address) in binary format
constexpr StatementTag CqlHostExchange::GET_HOST_BY_IPV6_SUBNET_ID_AND_ADDRESS;
constexpr StatementTag CqlHostExchange::GET_HOST_BY_IPV4_SUBNET_ID;
constexpr StatementTag CqlHostExchange::GET_HOST_BY_IPV6_SUBNET_ID;
+constexpr StatementTag CqlHostExchange::GET_HOST_BY_IPV4_SUBNET_ID_LIMIT;
+constexpr StatementTag CqlHostExchange::GET_HOST_BY_IPV6_SUBNET_ID_LIMIT;
+constexpr StatementTag CqlHostExchange::GET_HOST_BY_IPV4_SUBNET_ID_NEXT_KEY;
+constexpr StatementTag CqlHostExchange::GET_HOST_BY_IPV6_SUBNET_ID_NEXT_KEY;
+constexpr StatementTag CqlHostExchange::GET_HOST_BY_IPV4_SUBNET_ID_KEY;
+constexpr StatementTag CqlHostExchange::GET_HOST_BY_IPV6_SUBNET_ID_KEY;
+constexpr StatementTag CqlHostExchange::GET_HOST_BY_IPV4_SUBNET_ID_PAGE;
+constexpr StatementTag CqlHostExchange::GET_HOST_BY_IPV6_SUBNET_ID_PAGE;
constexpr StatementTag CqlHostExchange::DELETE_HOST;
StatementMap CqlHostExchange::tagged_statements_ = {
{INSERT_HOST,
{INSERT_HOST,
- "INSERT INTO host_reservations ( "
+ "INSERT INTO hosts ( "
+ "key, "
"id, "
"host_identifier, "
"host_identifier_type, "
"option_user_context, "
"option_scope_id "
") VALUES ( "
+ // key
+ "?, "
// id
"?, "
// host
{GET_HOST,
{GET_HOST,
"SELECT "
+ "key, "
"id, "
"host_identifier, "
"host_identifier_type, "
"option_subnet_id, "
"option_user_context, "
"option_scope_id "
- "FROM host_reservations "
+ "FROM hosts "
}},
{GET_HOST_BY_HOST_ID,
{GET_HOST_BY_HOST_ID,
"SELECT "
+ "key, "
"id, "
"host_identifier, "
"host_identifier_type, "
"option_subnet_id, "
"option_user_context, "
"option_scope_id "
- "FROM host_reservations "
+ "FROM hosts "
"WHERE host_identifier = ? "
"AND host_identifier_type = ? "
"ALLOW FILTERING "
{GET_HOST_BY_IPV4_ADDRESS,
{GET_HOST_BY_IPV4_ADDRESS,
"SELECT "
+ "key, "
"id, "
"host_identifier, "
"host_identifier_type, "
"option_subnet_id, "
"option_user_context, "
"option_scope_id "
- "FROM host_reservations "
+ "FROM hosts "
"WHERE host_ipv4_address = ? "
"ALLOW FILTERING "
}},
{GET_HOST_BY_IPV4_SUBNET_ID_AND_HOST_ID,
{GET_HOST_BY_IPV4_SUBNET_ID_AND_HOST_ID,
"SELECT "
+ "key, "
+ "id, "
+ "host_identifier, "
+ "host_identifier_type, "
+ "host_ipv4_subnet_id, "
+ "host_ipv6_subnet_id, "
+ "host_ipv4_address, "
+ "host_ipv4_next_server, "
+ "host_ipv4_server_hostname, "
+ "host_ipv4_boot_file_name, "
+ "auth_key, "
+ "hostname, "
+ "user_context, "
+ "host_ipv4_client_classes, "
+ "host_ipv6_client_classes, "
+ "reserved_ipv6_prefix_address, "
+ "reserved_ipv6_prefix_length, "
+ "reserved_ipv6_prefix_address_type, "
+ "iaid, "
+ "option_universe, "
+ "option_code, "
+ "option_value, "
+ "option_formatted_value, "
+ "option_space, "
+ "option_is_persistent, "
+ "option_client_class, "
+ "option_subnet_id, "
+ "option_user_context, "
+ "option_scope_id "
+ "FROM hosts "
+ "WHERE host_ipv4_subnet_id = ? "
+ "AND host_identifier = ? "
+ "AND host_identifier_type = ? "
+ "ALLOW FILTERING "
+ }},
+
+ {GET_HOST_BY_IPV6_SUBNET_ID_AND_HOST_ID,
+ {GET_HOST_BY_IPV6_SUBNET_ID_AND_HOST_ID,
+ "SELECT "
+ "key, "
+ "id, "
+ "host_identifier, "
+ "host_identifier_type, "
+ "host_ipv4_subnet_id, "
+ "host_ipv6_subnet_id, "
+ "host_ipv4_address, "
+ "host_ipv4_next_server, "
+ "host_ipv4_server_hostname, "
+ "host_ipv4_boot_file_name, "
+ "auth_key, "
+ "hostname, "
+ "user_context, "
+ "host_ipv4_client_classes, "
+ "host_ipv6_client_classes, "
+ "reserved_ipv6_prefix_address, "
+ "reserved_ipv6_prefix_length, "
+ "reserved_ipv6_prefix_address_type, "
+ "iaid, "
+ "option_universe, "
+ "option_code, "
+ "option_value, "
+ "option_formatted_value, "
+ "option_space, "
+ "option_is_persistent, "
+ "option_client_class, "
+ "option_subnet_id, "
+ "option_user_context, "
+ "option_scope_id "
+ "FROM hosts "
+ "WHERE host_ipv6_subnet_id = ? "
+ "AND host_identifier = ? "
+ "AND host_identifier_type = ? "
+ "ALLOW FILTERING "
+ }},
+
+ {GET_HOST_BY_IPV4_SUBNET_ID_AND_ADDRESS,
+ {GET_HOST_BY_IPV4_SUBNET_ID_AND_ADDRESS,
+ "SELECT "
+ "key, "
+ "id, "
+ "host_identifier, "
+ "host_identifier_type, "
+ "host_ipv4_subnet_id, "
+ "host_ipv6_subnet_id, "
+ "host_ipv4_address, "
+ "host_ipv4_next_server, "
+ "host_ipv4_server_hostname, "
+ "host_ipv4_boot_file_name, "
+ "auth_key, "
+ "hostname, "
+ "user_context, "
+ "host_ipv4_client_classes, "
+ "host_ipv6_client_classes, "
+ "reserved_ipv6_prefix_address, "
+ "reserved_ipv6_prefix_length, "
+ "reserved_ipv6_prefix_address_type, "
+ "iaid, "
+ "option_universe, "
+ "option_code, "
+ "option_value, "
+ "option_formatted_value, "
+ "option_space, "
+ "option_is_persistent, "
+ "option_client_class, "
+ "option_subnet_id, "
+ "option_user_context, "
+ "option_scope_id "
+ "FROM hosts "
+ "WHERE host_ipv4_subnet_id = ? "
+ "AND host_ipv4_address = ? "
+ "ALLOW FILTERING "
+ }},
+
+ {GET_HOST_BY_IPV6_PREFIX,
+ {GET_HOST_BY_IPV6_PREFIX,
+ "SELECT "
+ "key, "
+ "id, "
+ "host_identifier, "
+ "host_identifier_type, "
+ "host_ipv4_subnet_id, "
+ "host_ipv6_subnet_id, "
+ "host_ipv4_address, "
+ "host_ipv4_next_server, "
+ "host_ipv4_server_hostname, "
+ "host_ipv4_boot_file_name, "
+ "auth_key, "
+ "hostname, "
+ "user_context, "
+ "host_ipv4_client_classes, "
+ "host_ipv6_client_classes, "
+ "reserved_ipv6_prefix_address, "
+ "reserved_ipv6_prefix_length, "
+ "reserved_ipv6_prefix_address_type, "
+ "iaid, "
+ "option_universe, "
+ "option_code, "
+ "option_value, "
+ "option_formatted_value, "
+ "option_space, "
+ "option_is_persistent, "
+ "option_client_class, "
+ "option_subnet_id, "
+ "option_user_context, "
+ "option_scope_id "
+ "FROM hosts "
+ "WHERE reserved_ipv6_prefix_address = ? "
+ "AND reserved_ipv6_prefix_length = ? "
+ "ALLOW FILTERING "
+ }},
+
+ {GET_HOST_BY_IPV6_SUBNET_ID_AND_ADDRESS,
+ {GET_HOST_BY_IPV6_SUBNET_ID_AND_ADDRESS,
+ "SELECT "
+ "key, "
+ "id, "
+ "host_identifier, "
+ "host_identifier_type, "
+ "host_ipv4_subnet_id, "
+ "host_ipv6_subnet_id, "
+ "host_ipv4_address, "
+ "host_ipv4_next_server, "
+ "host_ipv4_server_hostname, "
+ "host_ipv4_boot_file_name, "
+ "auth_key, "
+ "hostname, "
+ "user_context, "
+ "host_ipv4_client_classes, "
+ "host_ipv6_client_classes, "
+ "reserved_ipv6_prefix_address, "
+ "reserved_ipv6_prefix_length, "
+ "reserved_ipv6_prefix_address_type, "
+ "iaid, "
+ "option_universe, "
+ "option_code, "
+ "option_value, "
+ "option_formatted_value, "
+ "option_space, "
+ "option_is_persistent, "
+ "option_client_class, "
+ "option_subnet_id, "
+ "option_user_context, "
+ "option_scope_id "
+ "FROM hosts "
+ "WHERE host_ipv6_subnet_id = ? "
+ "AND reserved_ipv6_prefix_address = ? "
+ "ALLOW FILTERING "
+ }},
+
+ {GET_HOST_BY_IPV4_SUBNET_ID,
+ {GET_HOST_BY_IPV4_SUBNET_ID,
+ "SELECT "
+ "key, "
+ "id, "
+ "host_identifier, "
+ "host_identifier_type, "
+ "host_ipv4_subnet_id, "
+ "host_ipv6_subnet_id, "
+ "host_ipv4_address, "
+ "host_ipv4_next_server, "
+ "host_ipv4_server_hostname, "
+ "host_ipv4_boot_file_name, "
+ "auth_key, "
+ "hostname, "
+ "user_context, "
+ "host_ipv4_client_classes, "
+ "host_ipv6_client_classes, "
+ "reserved_ipv6_prefix_address, "
+ "reserved_ipv6_prefix_length, "
+ "reserved_ipv6_prefix_address_type, "
+ "iaid, "
+ "option_universe, "
+ "option_code, "
+ "option_value, "
+ "option_formatted_value, "
+ "option_space, "
+ "option_is_persistent, "
+ "option_client_class, "
+ "option_subnet_id, "
+ "option_user_context, "
+ "option_scope_id "
+ "FROM hosts "
+ "WHERE host_ipv4_subnet_id = ? "
+ "ALLOW FILTERING "
+ }},
+
+ {GET_HOST_BY_IPV6_SUBNET_ID,
+ {GET_HOST_BY_IPV6_SUBNET_ID,
+ "SELECT "
+ "key, "
+ "id, "
+ "host_identifier, "
+ "host_identifier_type, "
+ "host_ipv4_subnet_id, "
+ "host_ipv6_subnet_id, "
+ "host_ipv4_address, "
+ "host_ipv4_next_server, "
+ "host_ipv4_server_hostname, "
+ "host_ipv4_boot_file_name, "
+ "auth_key, "
+ "hostname, "
+ "user_context, "
+ "host_ipv4_client_classes, "
+ "host_ipv6_client_classes, "
+ "reserved_ipv6_prefix_address, "
+ "reserved_ipv6_prefix_length, "
+ "reserved_ipv6_prefix_address_type, "
+ "iaid, "
+ "option_universe, "
+ "option_code, "
+ "option_value, "
+ "option_formatted_value, "
+ "option_space, "
+ "option_is_persistent, "
+ "option_client_class, "
+ "option_subnet_id, "
+ "option_user_context, "
+ "option_scope_id "
+ "FROM hosts "
+ "WHERE host_ipv6_subnet_id = ? "
+ "ALLOW FILTERING "
+ }},
+
+ {GET_HOST_BY_IPV4_SUBNET_ID_LIMIT,
+ {GET_HOST_BY_IPV4_SUBNET_ID_LIMIT,
+ "SELECT "
+ "key, "
+ "id, "
+ "host_identifier, "
+ "host_identifier_type, "
+ "host_ipv4_subnet_id, "
+ "host_ipv6_subnet_id, "
+ "host_ipv4_address, "
+ "host_ipv4_next_server, "
+ "host_ipv4_server_hostname, "
+ "host_ipv4_boot_file_name, "
+ "auth_key, "
+ "hostname, "
+ "user_context, "
+ "host_ipv4_client_classes, "
+ "host_ipv6_client_classes, "
+ "reserved_ipv6_prefix_address, "
+ "reserved_ipv6_prefix_length, "
+ "reserved_ipv6_prefix_address_type, "
+ "iaid, "
+ "option_universe, "
+ "option_code, "
+ "option_value, "
+ "option_formatted_value, "
+ "option_space, "
+ "option_is_persistent, "
+ "option_client_class, "
+ "option_subnet_id, "
+ "option_user_context, "
+ "option_scope_id "
+ "FROM hosts "
+ "WHERE host_ipv4_subnet_id = ? "
+ "LIMIT 1 "
+ "ALLOW FILTERING "
+ }},
+
+ {GET_HOST_BY_IPV4_SUBNET_ID_NEXT_KEY,
+ {GET_HOST_BY_IPV4_SUBNET_ID_NEXT_KEY,
+ "SELECT "
+ "key, "
"id, "
"host_identifier, "
"host_identifier_type, "
"option_subnet_id, "
"option_user_context, "
"option_scope_id "
- "FROM host_reservations "
+ "FROM hosts "
"WHERE host_ipv4_subnet_id = ? "
- "AND host_identifier = ? "
- "AND host_identifier_type = ? "
+ "AND TOKEN(key) > TOKEN(?) "
+ "LIMIT 1 "
"ALLOW FILTERING "
}},
- {GET_HOST_BY_IPV6_SUBNET_ID_AND_HOST_ID,
- {GET_HOST_BY_IPV6_SUBNET_ID_AND_HOST_ID,
+ {GET_HOST_BY_IPV4_SUBNET_ID_KEY,
+ {GET_HOST_BY_IPV4_SUBNET_ID_KEY,
"SELECT "
+ "key, "
"id, "
"host_identifier, "
"host_identifier_type, "
"option_subnet_id, "
"option_user_context, "
"option_scope_id "
- "FROM host_reservations "
- "WHERE host_ipv6_subnet_id = ? "
- "AND host_identifier = ? "
- "AND host_identifier_type = ? "
+ "FROM hosts "
+ "WHERE key = ? "
"ALLOW FILTERING "
}},
- {GET_HOST_BY_IPV4_SUBNET_ID_AND_ADDRESS,
- {GET_HOST_BY_IPV4_SUBNET_ID_AND_ADDRESS,
+ {GET_HOST_BY_IPV4_SUBNET_ID_PAGE,
+ {GET_HOST_BY_IPV4_SUBNET_ID_PAGE,
"SELECT "
+ "key, "
"id, "
"host_identifier, "
"host_identifier_type, "
"option_subnet_id, "
"option_user_context, "
"option_scope_id "
- "FROM host_reservations "
+ "FROM hosts "
"WHERE host_ipv4_subnet_id = ? "
- "AND host_ipv4_address = ? "
+ "AND id = ? "
+ "LIMIT 1 "
"ALLOW FILTERING "
}},
- {GET_HOST_BY_IPV6_PREFIX,
- {GET_HOST_BY_IPV6_PREFIX,
+ {GET_HOST_BY_IPV6_SUBNET_ID_LIMIT,
+ {GET_HOST_BY_IPV6_SUBNET_ID_LIMIT,
"SELECT "
+ "key, "
"id, "
"host_identifier, "
"host_identifier_type, "
"option_subnet_id, "
"option_user_context, "
"option_scope_id "
- "FROM host_reservations "
- "WHERE reserved_ipv6_prefix_address = ? "
- "AND reserved_ipv6_prefix_length = ? "
+ "FROM hosts "
+ "WHERE host_ipv6_subnet_id = ? "
+ "LIMIT 1 "
"ALLOW FILTERING "
}},
- {GET_HOST_BY_IPV6_SUBNET_ID_AND_ADDRESS,
- {GET_HOST_BY_IPV6_SUBNET_ID_AND_ADDRESS,
+ {GET_HOST_BY_IPV6_SUBNET_ID_NEXT_KEY,
+ {GET_HOST_BY_IPV6_SUBNET_ID_NEXT_KEY,
"SELECT "
+ "key, "
"id, "
"host_identifier, "
"host_identifier_type, "
"option_subnet_id, "
"option_user_context, "
"option_scope_id "
- "FROM host_reservations "
+ "FROM hosts "
"WHERE host_ipv6_subnet_id = ? "
- "AND reserved_ipv6_prefix_address = ? "
+ "AND TOKEN(key) > TOKEN(?) "
+ "LIMIT 1 "
"ALLOW FILTERING "
}},
- {GET_HOST_BY_IPV4_SUBNET_ID,
- {GET_HOST_BY_IPV4_SUBNET_ID,
+ {GET_HOST_BY_IPV6_SUBNET_ID_KEY,
+ {GET_HOST_BY_IPV6_SUBNET_ID_KEY,
"SELECT "
+ "key, "
"id, "
"host_identifier, "
"host_identifier_type, "
"option_subnet_id, "
"option_user_context, "
"option_scope_id "
- "FROM host_reservations "
- "WHERE host_ipv4_subnet_id = ? "
+ "FROM hosts "
+ "WHERE key = ? "
"ALLOW FILTERING "
}},
- {GET_HOST_BY_IPV6_SUBNET_ID,
- {GET_HOST_BY_IPV6_SUBNET_ID,
+ {GET_HOST_BY_IPV6_SUBNET_ID_PAGE,
+ {GET_HOST_BY_IPV6_SUBNET_ID_PAGE,
"SELECT "
+ "key, "
"id, "
"host_identifier, "
"host_identifier_type, "
"option_subnet_id, "
"option_user_context, "
"option_scope_id "
- "FROM host_reservations "
+ "FROM hosts "
"WHERE host_ipv6_subnet_id = ? "
+ "AND id = ? "
+ "LIMIT 1 "
"ALLOW FILTERING "
}},
{DELETE_HOST,
{DELETE_HOST,
- "DELETE FROM host_reservations WHERE id = ? "
+ "DELETE FROM hosts WHERE key = ? AND id = ? "
"IF EXISTS "
}}
};
// Start with a fresh array.
data.clear();
- // id: blob
+ // key: bigint
+ data.add(&key_);
+ // id: bigint
data.add(&id_);
// host_identifier: blob
data.add(&host_identifier_);
}
// id: bigint
- id_ = hashIntoId();
+ id_ = static_cast<cass_int64_t>(hashIntoId());
+
+ // key: bigint
+ key_ = static_cast<cass_int64_t>(hashIntoKey());
} catch (const Exception& ex) {
isc_throw(DbOperationError,
"CqlHostExchange::prepareExchange(): "
data.clear();
if (statement_tag == CqlHostExchange::INSERT_HOST) {
+ data.add(&key_);
data.add(&id_);
data.add(&host_identifier_);
data.add(&host_identifier_type_);
data.clear();
if (statement_tag == CqlHostExchange::DELETE_HOST) {
+ data.add(&key_);
data.add(&id_);
}
}
}
-cass_int64_t
+uint64_t
CqlHostExchange::hashIntoId() const {
// Add a separator between aggregated field to avoid collisions
// between distinct entries.
+ // Get key.
+ std::stringstream key_stream;
+ key_stream << hostKey();
+ key_stream << std::setw(V6ADDRESS_TEXT_MAX_LEN) << std::setfill('-')
+ << reserved_ipv6_prefix_address_;
+ key_stream << std::setw(4) << std::setfill('-')
+ << reserved_ipv6_prefix_length_;
+ key_stream << std::setw(4) << std::setfill('-') << option_code_;
+ key_stream << std::setw(OPTION_SPACE_MAX_LENGTH) << std::setfill('-')
+ << option_space_;
+ const std::string key = key_stream.str();
+
+ return (Hash64::hash(key));
+}
+
+uint64_t
+CqlHostExchange::hashIntoKey() const {
+ const std::string key = hostKey();
+
+ return (Hash64::hash(key));
+}
+
+std::string
+CqlHostExchange::hostKey() const {
+ // Add a separator between aggregated field to avoid collisions
+ // between distinct entries.
// Get key.
std::stringstream key_stream;
if (host_ipv4_address_) {
key_stream << std::setw(10) << std::setfill('-') << host_ipv6_subnet_id_;
key_stream << std::setw(V4ADDRESS_TEXT_MAX_LEN) << std::setfill('-')
<< host_ipv4_address_;
- key_stream << std::setw(V6ADDRESS_TEXT_MAX_LEN) << std::setfill('-')
- << reserved_ipv6_prefix_address_;
- key_stream << std::setw(4) << std::setfill('-')
- << reserved_ipv6_prefix_length_;
- key_stream << std::setw(4) << std::setfill('-') << option_code_;
- key_stream << std::setw(OPTION_SPACE_MAX_LENGTH) << std::setfill('-')
- << option_space_;
- const std::string key = key_stream.str();
-
- const cass_int64_t hash = static_cast<cass_int64_t>(Hash64::hash(key));
-
- return (hash);
+ return key_stream.str();
}
boost::any
if (!option_definition_ptr) {
// If no definition found, we use generic option type.
OptionBuffer option_buffer(option_value_.begin(), option_value_.end());
- option.reset(new Option(static_cast<Option::Universe>(option_universe_),
- static_cast<uint16_t>(option_code_),
- option_buffer.begin(), option_buffer.end()));
+ option = boost::make_shared<Option>(static_cast<Option::Universe>(option_universe_),
+ static_cast<uint16_t>(option_code_),
+ option_buffer.begin(), option_buffer.end());
} else {
// The option value may be specified in textual or binary format
// in the
}
}
- OptionWrapper result(OptionDescriptorPtr(new OptionDescriptor(option, option_is_persistent_,
- option_formatted_value_)), option_space_);
+ OptionWrapper result(boost::make_shared<OptionDescriptor>(option, option_is_persistent_,
+ option_formatted_value_),
+ option_space_);
// Set the user context if there is one into the option descriptor.
if (!option_user_context_.empty()) {
try {
/// @brief Implementation of @ref CqlHostDataSource::getPage4()
///
- /// Not implemented.
- /// @todo: implement it (#427).
- ///
/// See @ref CqlHostDataSource::getPage4() for parameter details.
///
/// @param subnet_id identifier of the subnet to which hosts belong
/// @brief Implementation of @ref CqlHostDataSource::getPage6()
///
- /// Not implemented.
- /// @todo: implement it (#427).
- ///
/// See @ref CqlHostDataSource::getPage6() for parameter details.
///
/// @param subnet_id identifier of the subnet to which hosts belong
virtual ConstHostCollection getHostCollection(StatementTag statement_tag,
AnyArray& where_values) const;
+ /// @brief Retrieves a page of hosts.
+ ///
+ /// @param subnet_id identifier of the subnet to which hosts belong
+ /// @param lower_host_id Host identifier used as lower bound for the
+ /// returned range.
+ /// @param count the size of the page
+ ///
+ /// @return a collection of hosts containing one or more hosts
+ virtual ConstHostCollection getHostCollectionPage4(const SubnetID& subnet_id,
+ uint64_t lower_host_id,
+ size_t count = 0) const;
+
+ /// @brief Retrieves a page of hosts.
+ ///
+ /// @param subnet_id identifier of the subnet to which hosts belong
+ /// @param lower_host_id Host identifier used as lower bound for the
+ /// returned range.
+ /// @param count the size of the page
+ ///
+ /// @return a collection of hosts containing one or more hosts
+ virtual ConstHostCollection getHostCollectionPage6(const SubnetID& subnet_id,
+ uint64_t lower_host_id,
+ size_t count = 0) const;
+
+ /// @brief Retrieves a host by key.
+ ///
+ /// @param key identifier of the host
+ ///
+ /// @return a host for the specific key
+ virtual ConstHostPtr getHostByKey4(uint64_t key) const;
+
+ /// @brief Retrieves a host by key.
+ ///
+ /// @param key identifier of the host
+ ///
+ /// @return a host for the specific key
+ virtual ConstHostPtr getHostByKey6(uint64_t key) const;
+
+ /// @brief Retrieves a valid host key.
+ /// if lower_host_id is 0 the key parameter will be updated with the key of
+ /// the first host
+ /// if lower_host_id is not 0 the key parameter will be updated with the
+ /// next valid host key
+ ///
+ /// @param subnet_id identifier of the subnet to which hosts belong
+ /// @param lower_host_id Host identifier used as lower bound for the
+ /// returned range.
+ /// @param key identifier of the host which will be updated
+ ///
+ /// @return true if there is sunh a host
+ virtual bool getHostKey4(const SubnetID& subnet_id,
+ uint64_t lower_host_id,
+ uint64_t& key) const;
+
+ /// @brief Retrieves a valid host key.
+ /// if lower_host_id is 0 the key parameter will be updated with the key of
+ /// the first host
+ /// if lower_host_id is not 0 the key parameter will be updated with the
+ /// next valid host key
+ ///
+ /// @param subnet_id identifier of the subnet to which hosts belong
+ /// @param lower_host_id Host identifier used as lower bound for the
+ /// returned range.
+ /// @param key identifier of the host which will be updated
+ ///
+ /// @return true if there is sunh a host
+ virtual bool getHostKey6(const SubnetID& subnet_id,
+ uint64_t lower_host_id,
+ uint64_t& key) const;
+
+ /// @brief Retrieves next valid host key.
+ ///
+ /// @param subnet_id identifier of the subnet to which hosts belong
+ /// @param key identifier of the host which will be updated with the next
+ /// valid host key
+ ///
+ /// @return true if there is such a host
+ virtual bool getNextHostKey4(const SubnetID& subnet_id,
+ uint64_t& key) const;
+
+ /// @brief Retrieves next valid host key.
+ ///
+ /// @param subnet_id identifier of the subnet to which hosts belong
+ /// @param key identifier of the host which will be updated with the next
+ /// valid host key
+ ///
+ /// @return true if there is such a host
+ virtual bool getNextHostKey6(const SubnetID& subnet_id,
+ uint64_t& key) const;
+
/// @brief Inserts or deletes a single host.
///
/// All information is available here. Calls @ref
if (code_version != db_version) {
isc_throw(DbOpenError, "Cassandra schema version mismatch: need version: "
<< code_version.first << "." << code_version.second
- << " found version: " << db_version.first << "."
+ << " found version: " << db_version.first << "."
<< db_version.second);
}
// Get host id.
host = getHost(CqlHostExchange::GET_HOST_BY_IPV6_PREFIX, where_values);
- if (host == ConstHostPtr()) {
- return (ConstHostPtr());
+ if (!host) {
+ return ConstHostPtr();
}
// Get host.
// paging at the API level.
ConstHostCollection
-CqlHostDataSourceImpl::getPage4(const SubnetID& /*subnet_id*/,
- uint64_t /*lower_host_id*/,
- const HostPageSize& /*page_size*/) const {
- isc_throw(NotImplemented,
- "reservation-get-page is not supported by Cassandra");
+CqlHostDataSourceImpl::getPage4(const SubnetID& subnet_id,
+ uint64_t lower_host_id,
+ const HostPageSize& page_size) const {
+ // Run statement.
+ ConstHostCollection result =
+ getHostCollectionPage4(subnet_id, lower_host_id, page_size.page_size_);
+
+ return (result);
}
ConstHostCollection
-CqlHostDataSourceImpl::getPage6(const SubnetID& /*subnet_id*/,
- uint64_t /*lower_host_id*/,
- const HostPageSize& /*page_size*/) const {
- isc_throw(NotImplemented,
- "reservation-get-page is not supported by Cassandra");
+CqlHostDataSourceImpl::getPage6(const SubnetID& subnet_id,
+ uint64_t lower_host_id,
+ const HostPageSize& page_size) const {
+ // Run statement.
+ ConstHostCollection result =
+ getHostCollectionPage6(subnet_id, lower_host_id, page_size.page_size_);
+
+ return (result);
}
ConstHostCollection
ConstHostCollection
CqlHostDataSourceImpl::getHostCollection(StatementTag statement_tag,
AnyArray& where_values) const {
-
// Run statement.
std::unique_ptr<CqlHostExchange> host_exchange(new CqlHostExchange());
AnyArray collection = host_exchange->executeSelect(dbconn_, where_values,
statement_tag, false);
- // Form HostPtr objects.
+ // Create HostPtr objects.
HostCollection host_collection;
for (boost::any& host : collection) {
host_collection.push_back(HostPtr(boost::any_cast<Host*>(host)));
}
- // Merge the denormalized table entries that belong to the same host
- // into a
- // single host.
+ // Merge the denormalized table entries that belong to the same host into a single host.
HostMap map;
for (HostPtr& host : host_collection) {
-
HostKey key = HostKey(host->getIdentifier(), host->getIdentifierType(),
host->getIPv4SubnetID(), host->getIPv6SubnetID(),
host->getIPv4Reservation());
}
ConstHostCollection result_collection;
- for (HostPair pair : map) {
- result_collection.push_back(pair.second);
+
+ for (HostPtr& host : host_collection) {
+ HostKey key = HostKey(host->getIdentifier(), host->getIdentifierType(),
+ host->getIPv4SubnetID(), host->getIPv6SubnetID(),
+ host->getIPv4Reservation());
+ if (map.find(key) != map.end()) {
+ result_collection.push_back(map[key]);
+ map.erase(key);
+ }
+ }
+ return (result_collection);
+}
+
+ConstHostPtr
+CqlHostDataSourceImpl::getHostByKey4(uint64_t key) const {
+ // Bind to array.
+ AnyArray where_values;
+ cass_int64_t key_data = static_cast<cass_int64_t>(key);
+ where_values.add(&key_data);
+
+ // Run statement.
+ ConstHostCollection collection =
+ getHostCollection(CqlHostExchange::GET_HOST_BY_IPV4_SUBNET_ID_KEY,
+ where_values);
+
+ if (collection.empty()) {
+ return (ConstHostPtr());
+ }
+
+ if (collection.size() >= 2u) {
+ isc_throw(MultipleRecords, "CqlHostDataSourceImpl::getHost(): multiple records were "
+ "found in the database where only one was expected for statement "
+ << CqlHostExchange::GET_HOST_BY_IPV4_SUBNET_ID_KEY);
+ }
+
+ return (*collection.begin());
+}
+
+ConstHostPtr
+CqlHostDataSourceImpl::getHostByKey6(uint64_t key) const {
+ // Bind to array.
+ AnyArray where_values;
+ cass_int64_t key_data = static_cast<cass_int64_t>(key);
+ where_values.add(&key_data);
+
+ // Run statement.
+ ConstHostCollection collection =
+ getHostCollection(CqlHostExchange::GET_HOST_BY_IPV6_SUBNET_ID_KEY,
+ where_values);
+
+ if (collection.empty()) {
+ return (ConstHostPtr());
+ }
+
+ if (collection.size() >= 2u) {
+ isc_throw(MultipleRecords, "CqlHostDataSourceImpl::getHost(): multiple records were "
+ "found in the database where only one was expected for statement "
+ << CqlHostExchange::GET_HOST_BY_IPV6_SUBNET_ID_KEY);
+ }
+
+ return (*collection.begin());
+}
+
+ConstHostCollection
+CqlHostDataSourceImpl::getHostCollectionPage4(const SubnetID& subnet_id,
+ uint64_t lower_host_id,
+ size_t count) const {
+ ConstHostCollection result_collection;
+ for (; count; count--) {
+ uint64_t key;
+ bool valid_key = getHostKey4(subnet_id, lower_host_id, key);
+
+ if (!valid_key) {
+ break;
+ }
+
+ ConstHostPtr host = getHostByKey4(key);
+ result_collection.push_back(host);
+ lower_host_id = host->getHostId();
+ }
+
+ return (result_collection);
+}
+
+ConstHostCollection
+CqlHostDataSourceImpl::getHostCollectionPage6(const SubnetID& subnet_id,
+ uint64_t lower_host_id,
+ size_t count) const {
+ ConstHostCollection result_collection;
+ for (; count; count--) {
+ uint64_t key;
+ bool valid_key = getHostKey6(subnet_id, lower_host_id, key);
+
+ if (!valid_key) {
+ break;
+ }
+
+ ConstHostPtr host = getHostByKey6(key);
+ result_collection.push_back(host);
+ lower_host_id = host->getHostId();
}
+
return (result_collection);
}
+bool
+CqlHostDataSourceImpl::getHostKey4(const SubnetID& subnet_id,
+ uint64_t lower_host_id,
+ uint64_t& key) const {
+ // Convert to CQL data types.
+ cass_int32_t host_subnet_id = static_cast<cass_int32_t>(subnet_id);
+
+ // Bind to array.
+ AnyArray where_values;
+ where_values.add(&host_subnet_id);
+
+ cass_int64_t host_data = 0;
+ if (lower_host_id) {
+ host_data = static_cast<cass_int64_t>(lower_host_id);
+ where_values.add(&host_data);
+ }
+
+ // Run statement.
+ // This will retrieve first row of the first host (lower_host_id == 0)
+ // or the first row of the host (lower_host_id != 0)
+ std::unique_ptr<CqlHostExchange> host_exchange(new CqlHostExchange());
+ AnyArray collection;
+ if (lower_host_id) {
+ collection = host_exchange->executeSelect(dbconn_, where_values,
+ CqlHostExchange::GET_HOST_BY_IPV4_SUBNET_ID_PAGE, false);
+ } else {
+ collection = host_exchange->executeSelect(dbconn_, where_values,
+ CqlHostExchange::GET_HOST_BY_IPV4_SUBNET_ID_LIMIT, false);
+ }
+
+ // Create HostPtr objects.
+ HostCollection host_collection;
+ for (boost::any& host : collection) {
+ host_collection.push_back(HostPtr(boost::any_cast<Host*>(host)));
+ }
+
+ // If there is no host, just exit
+ if (host_collection.empty()) {
+ return false;
+ }
+
+ key = host_exchange->hashIntoKey();
+
+ if (lower_host_id) {
+ return getNextHostKey4(subnet_id, key);
+ }
+
+ return true;
+}
+
+bool
+CqlHostDataSourceImpl::getHostKey6(const SubnetID& subnet_id,
+ uint64_t lower_host_id,
+ uint64_t& key) const {
+ // Convert to CQL data types.
+ cass_int32_t host_subnet_id = static_cast<cass_int32_t>(subnet_id);
+
+ // Bind to array.
+ AnyArray where_values;
+ where_values.add(&host_subnet_id);
+
+ cass_int64_t host_data = 0;
+ if (lower_host_id) {
+ host_data = static_cast<cass_int64_t>(lower_host_id);
+ where_values.add(&host_data);
+ }
+
+ // Run statement.
+ // This will retrieve first row of the first host (lower_host_id == 0)
+ // or the first row of the host (lower_host_id != 0)
+ std::unique_ptr<CqlHostExchange> host_exchange(new CqlHostExchange());
+ AnyArray collection;
+ if (lower_host_id) {
+ collection = host_exchange->executeSelect(dbconn_, where_values,
+ CqlHostExchange::GET_HOST_BY_IPV6_SUBNET_ID_PAGE, false);
+ } else {
+ collection = host_exchange->executeSelect(dbconn_, where_values,
+ CqlHostExchange::GET_HOST_BY_IPV6_SUBNET_ID_LIMIT, false);
+ }
+
+ // Create HostPtr objects.
+ HostCollection host_collection;
+ for (boost::any& host : collection) {
+ host_collection.push_back(HostPtr(boost::any_cast<Host*>(host)));
+ }
+
+ // If there is no host, just exit
+ if (host_collection.empty()) {
+ return false;
+ }
+
+ key = host_exchange->hashIntoKey();
+
+ if (lower_host_id) {
+ return getNextHostKey6(subnet_id, key);
+ }
+
+ return true;
+}
+
+bool
+CqlHostDataSourceImpl::getNextHostKey4(const SubnetID& subnet_id,
+ uint64_t& key) const {
+ // Convert to CQL data types.
+ cass_int32_t host_subnet_id = static_cast<cass_int32_t>(subnet_id);
+ cass_int64_t key_data = static_cast<cass_int64_t>(key);
+
+ // Bind to array.
+ AnyArray where_values;
+ where_values.add(&host_subnet_id);
+ where_values.add(&key_data);
+
+ // This will retrieve first row of the next host (lower_host_id != 0)
+ std::unique_ptr<CqlHostExchange> host_exchange(new CqlHostExchange());
+ AnyArray collection = host_exchange->executeSelect(dbconn_, where_values,
+ CqlHostExchange::GET_HOST_BY_IPV4_SUBNET_ID_NEXT_KEY, false);
+
+ // Create HostPtr objects.
+ HostCollection host_collection;
+ for (boost::any& host : collection) {
+ host_collection.push_back(HostPtr(boost::any_cast<Host*>(host)));
+ }
+
+ if (host_collection.empty()) {
+ return false;
+ }
+
+ key = host_exchange->hashIntoKey();
+ return true;
+}
+
+bool
+CqlHostDataSourceImpl::getNextHostKey6(const SubnetID& subnet_id,
+ uint64_t& key) const {
+ // Convert to CQL data types.
+ cass_int32_t host_subnet_id = static_cast<cass_int32_t>(subnet_id);
+ cass_int64_t key_data = static_cast<cass_int64_t>(key);
+
+ // Bind to array.
+ AnyArray where_values;
+ where_values.add(&host_subnet_id);
+ where_values.add(&key_data);
+
+ // This will retrieve first row of the next host (lower_host_id != 0)
+ std::unique_ptr<CqlHostExchange> host_exchange(new CqlHostExchange());
+ AnyArray collection = host_exchange->executeSelect(dbconn_, where_values,
+ CqlHostExchange::GET_HOST_BY_IPV6_SUBNET_ID_NEXT_KEY, false);
+
+ // Create HostPtr objects.
+ HostCollection host_collection;
+ for (boost::any& host : collection) {
+ host_collection.push_back(HostPtr(boost::any_cast<Host*>(host)));
+ }
+
+ if (host_collection.empty()) {
+ return false;
+ }
+
+ key = host_exchange->hashIntoKey();
+ return true;
+}
+
bool
CqlHostDataSourceImpl::insertOrDeleteHost(bool insert,
const HostPtr& host,
host_exchange->createBindForMutation(host, subnet_id, reservation, option_space,
option_descriptor, CqlHostExchange::INSERT_HOST, assigned_values);
-
host_exchange->executeMutation(dbconn_, assigned_values, CqlHostExchange::INSERT_HOST);
} else {
host_exchange->createBindForDelete(host, subnet_id, reservation, option_space,
EXPECT_FALSE(ctx.old_lease_);
}
-
// This test checks the behavior of the allocation engine in the following
// scenario:
// - Client has no lease in the database.
bool testStatistics(const std::string& stat_name, const int64_t exp_value,
const SubnetID subnet_id) {
try {
- std::string name = (subnet_id == SUBNET_ID_UNUSED ? stat_name :
+ std::string name = (subnet_id == SUBNET_ID_UNUSED ? stat_name :
StatsMgr::generateName("subnet", subnet_id, stat_name));
ObservationPtr observation = StatsMgr::instance().getObservation(name);
if (observation) {
EXPECT_EQ(lease->subnet_id_, subnet_->getID());
if (expected_in_subnet) {
- EXPECT_TRUE(subnet_->inRange(lease->addr_))
+ EXPECT_TRUE(subnet_->inRange(lease->addr_))
<< " address: " << lease->addr_.toText();
} else {
EXPECT_FALSE(subnet_->inRange(lease->addr_))
EXPECT_NO_THROW(cfg4.use(AF_INET, "eth1/192.0.2.3"));
std::string comment = "{ \"comment\": \"foo\", \"bar\": 1 }";
EXPECT_NO_THROW(cfg4.setContext(Element::fromJSON(comment)));
-
+
// Check unparse
std::string expected =
"{ \"comment\": \"foo\", "
#include <config.h>
#include <asiolink/io_address.h>
+#include <dhcpsrv/tests/test_utils.h>
#include <exceptions/exceptions.h>
-#include <cql/cql_connection.h>
-#include <cql/testutils/cql_schema.h>
#include <dhcpsrv/host.h>
-#include <dhcpsrv/host_mgr.h>
-#include <dhcpsrv/host_data_source_factory.h>
-#include <dhcpsrv/cql_lease_mgr.h>
#include <dhcpsrv/cql_host_data_source.h>
#include <dhcpsrv/testutils/generic_host_data_source_unittest.h>
#include <dhcpsrv/testutils/host_data_source_utils.h>
-#include <dhcpsrv/tests/test_utils.h>
+#include <dhcpsrv/host_mgr.h>
+#include <dhcpsrv/host_data_source_factory.h>
+#include <cql/cql_connection.h>
+#include <cql/cql_exchange.h>
+#include <cql/testutils/cql_schema.h>
#include <gtest/gtest.h>
// CQL specifies the timeout values in ms, not seconds. Therefore
// we need to add extra 000 to the "connect-timeout=10" string.
string connection_string = validCqlConnectionString() + string(" ") +
- string(VALID_TIMEOUT) + string("000");
+ string(VALID_TIMEOUT) + string("000");
HostMgr::create();
EXPECT_NO_THROW(HostMgr::addBackend(connection_string));
HostMgr::delBackend("cql");
testReadOnlyDatabase(CQL_VALID_TYPE);
}
+// Test verifies if a host reservation can be added and later retrieved by IPv4
+// address. Host uses hw address as identifier.
+TEST_F(CqlHostDataSourceTest, basic4HWAddr) {
+ testBasic4(Host::IDENT_HWADDR);
+}
+
+// Verifies that IPv4 host reservation with options can have a the global
+// subnet id value
+TEST_F(CqlHostDataSourceTest, globalSubnetId4) {
+ testGlobalSubnetId4();
+}
+
+// Verifies that IPv6 host reservation with options can have a the global
+// subnet id value
+TEST_F(CqlHostDataSourceTest, globalSubnetId6) {
+ testGlobalSubnetId6();
+}
+
+// Verifies that IPv4 host reservation with options can have a max value
+// for dhcp4_subnet id
+TEST_F(CqlHostDataSourceTest, maxSubnetId4) {
+ testMaxSubnetId4();
+}
+
+// Verifies that IPv6 host reservation with options can have a max value
+// for dhcp6_subnet id
+TEST_F(CqlHostDataSourceTest, maxSubnetId6) {
+ testMaxSubnetId6();
+}
+
// Verifies that IPv4 host reservations in the same subnet can be retrieved
TEST_F(CqlHostDataSourceTest, getAll4BySubnet) {
testGetAll4();
// Verifies that IPv4 host reservations in the same subnet can be retrieved
// by pages.
-// Does not work because TOKEN(id) order is not the same than id...
-TEST_F(CqlHostDataSourceTest, DISABLED_getPage4) {
+TEST_F(CqlHostDataSourceTest, getPage4) {
testGetPage4();
}
// Verifies that IPv6 host reservations in the same subnet can be retrieved
// by pages.
-TEST_F(CqlHostDataSourceTest, DISABLED_getPage6) {
+TEST_F(CqlHostDataSourceTest, getPage6) {
testGetPage6();
}
-// Test verifies if a host reservation can be added and later retrieved by IPv4
-// address. Host uses hw address as identifier.
-TEST_F(CqlHostDataSourceTest, basic4HWAddr) {
- testBasic4(Host::IDENT_HWADDR);
+// Verifies that IPv4 host reservations in the same subnet can be retrieved
+// by pages without truncation from the limit.
+TEST_F(CqlHostDataSourceTest, getPageLimit4) {
+ testGetPageLimit4(Host::IDENT_DUID);
+}
+
+// Verifies that IPv6 host reservations in the same subnet can be retrieved
+// by pages without truncation from the limit.
+TEST_F(CqlHostDataSourceTest, getPageLimit6) {
+ testGetPageLimit6(Host::IDENT_HWADDR);
+}
+
+// Verifies that IPv4 host reservations in the same subnet can be retrieved
+// by pages even with multiple subnets.
+TEST_F(CqlHostDataSourceTest, getPage4Subnets) {
+ testGetPage4Subnets();
+}
+
+// Verifies that IPv6 host reservations in the same subnet can be retrieved
+// by pages even with multiple subnets.
+TEST_F(CqlHostDataSourceTest, getPage6Subnets) {
+ testGetPage6Subnets();
}
// Test verifies if a host reservation can be added and later retrieved by IPv4
params["name"] = "keatest";
params["user"] = "keatest";
params["password"] = "keatest";
- CqlConnection connection(params);
- ASSERT_NO_THROW(connection.openDatabase());
+ CqlConnection conn(params);
+ ASSERT_NO_THROW(conn.openDatabase());
- // Drop every table so we make sure host_reservations doesn't exist anymore.
+ // Drop every table so we make sure hosts doesn't exist anymore.
destroyCqlSchema(false, true);
// Create a host with a reservation.
HostPtr host = HostDataSourceUtils::initializeHost6("2001:db8:1::1",
- Host::IDENT_HWADDR, false, "key##1");
+ Host::IDENT_HWADDR, false, "randomKey");
// Let's assign some DHCPv4 subnet to the host, because we will use the
// DHCPv4 subnet to try to retrieve the host after failed insertion.
host->setIPv4SubnetID(SubnetID(4));
testMultipleHosts6();
}
-// Verifies that IPv4 host reservation with options can have a the global
-// subnet id value
-TEST_F(CqlHostDataSourceTest, globalSubnetId4) {
- testGlobalSubnetId4();
-}
-
-// Verifies that IPv6 host reservation with options can have a the global
-// subnet id value
-TEST_F(CqlHostDataSourceTest, globalSubnetId6) {
- testGlobalSubnetId6();
-}
-
-// Verifies that IPv4 host reservation with options can have a max value
-// for dhcp4_subnet id
-TEST_F(CqlHostDataSourceTest, maxSubnetId4) {
- testMaxSubnetId4();
-}
-
-// Verifies that IPv6 host reservation with options can have a max value
-// for dhcp6_subnet id
-TEST_F(CqlHostDataSourceTest, maxSubnetId6) {
- testMaxSubnetId6();
-}
-
-
-
} // namespace
}
// This is the CQL implementation for
- // GenericLeaseMgrTest::testGetExpiredLeases4().
+ // GenericLeaseMgrTest::testGetExpiredLeases6().
// The GenericLeaseMgrTest implementation checks for the order of expired
// leases to be from the most expired to the least expired. Cassandra
// doesn't support ORDER BY without imposing a EQ / IN restriction on the
}
// Retrieve expired leases again. The limit of 0 means return all
- // expired
- // leases.
+ // expired leases.
ASSERT_NO_THROW(lmptr_->getExpiredLeases6(expired_leases, 0));
// The same leases should be returned.
/// Adds 3 lease and verifies fetch by DUID.
/// Verifies retrival of non existant DUID fails
TEST_F(CqlLeaseMgrTest, getLeases6Duid) {
- testGetLeases6Duid();
+ testGetLeases6Duid();
}
/// @brief Check GetLease6 methods - access by DUID/IAID/SubnetID
testNullDuid();
}
-/// @brief Tests whether memfile can store and retrieve hardware addresses
+/// @brief Tests whether CQL can store and retrieve hardware addresses
TEST_F(CqlLeaseMgrTest, testLease6Mac) {
testLease6MAC();
}
-/// @brief Tests whether memfile can store and retrieve hardware addresses
+/// @brief Tests whether CQL can store and retrieve hardware addresses
TEST_F(CqlLeaseMgrTest, testLease6HWTypeAndSource) {
testLease6HWTypeAndSource();
}
testRecountLeaseStats6();
}
-// @brief Tests that leases from specific subnet can be removed.
+/// @brief Tests that leases from specific subnet can be removed.
/// @todo: uncomment this once lease wipe is implemented
/// for Cassandra (see #5485)
TEST_F(CqlLeaseMgrTest, DISABLED_wipeLeases4) {
testWipeLeases4();
}
-// @brief Tests that leases from specific subnet can be removed.
+/// @brief Tests that leases from specific subnet can be removed.
/// @todo: uncomment this once lease wipe is implemented
/// for Cassandra (see #5485)
TEST_F(CqlLeaseMgrTest, DISABLED_wipeLeases6) {
///
/// @param address Address to use for the initialization
///
- /// @return Lease6Ptr. This will not point to anything if the initialization
+ /// @return Lease6Ptr. This will not point to anything if the
+ /// initialization
/// failed (e.g. unknown address).
Lease6Ptr initializeLease6(std::string address);
const std::vector<std::string>& expected_addresses);
/// @brief String forms of IPv4 addresses
- std::vector<std::string> straddress4_;
+ std::vector<std::string> straddress4_;
/// @brief IOAddress forms of IPv4 addresses
std::vector<isc::asiolink::IOAddress> ioaddress4_;
/// @brief String forms of IPv6 addresses
- std::vector<std::string> straddress6_;
+ std::vector<std::string> straddress6_;
/// @brief Types of IPv6 Leases
std::vector<Lease::Type> leasetype6_;
/// @brief Verifies open failures do NOT invoke db lost callback
///
- /// The db lost callback should only be invoked after succesfully
+ /// The db lost callback should only be invoked after successfully
/// opening the DB and then subsequently losing it. Failing to
/// open should be handled directly by the application layer.
void testNoCallbackOnOpenFailure();
};
-}; // namespace test
-}; // namespace dhcp
-}; // namespace isc
+} // namespace test
+} // namespace dhcp
+} // namespace isc
#endif
/// Closes the database and re-open it. Anything committed should be
/// visible.
///
- /// Parameter is ignored for MySQL backend as the v4 and v6 leases share
+ /// Parameter is ignored for MySQL backend as the v4 and v6 hosts share
/// the same database.
void reopen(Universe) {
HostMgr::create();
///
/// This test checks if the MySqlHostDataSource can be instantiated. This happens
/// only if the database can be opened. Note that this is not part of the
-/// MySqlLeaseMgr test fixure set. This test checks that the database can be
+/// MySqlHostMgr test fixure set. This test checks that the database can be
/// opened: the fixtures assume that and check basic operations.
TEST(MySqlHostDataSource, OpenDatabase) {
<< "*** before the MySQL tests will run correctly.\n";
}
- // Check that attempting to get an instance of the host manager when
+ // Check that attempting to get an instance of the host data source when
// none is set throws an exception.
EXPECT_FALSE(HostMgr::instance().getHostDataSource());
ASSERT_EQ(0, status) << mysql_error(conn.mysql_);
// Create a host with a reservation.
- HostPtr host = HostDataSourceUtils::initializeHost6("2001:db8:1::1", Host::IDENT_HWADDR, false);
+ HostPtr host = HostDataSourceUtils::initializeHost6("2001:db8:1::1",
+ Host::IDENT_HWADDR, false);
// Let's assign some DHCPv4 subnet to the host, because we will use the
// DHCPv4 subnet to try to retrieve the host after failed insertion.
host->setIPv4SubnetID(SubnetID(4));
testNullDuid();
}
-/// @brief Tests whether memfile can store and retrieve hardware addresses
+/// @brief Tests whether MySQL can store and retrieve hardware addresses
TEST_F(MySqlLeaseMgrTest, testLease6Mac) {
testLease6MAC();
}
-/// @brief Tests whether memfile can store and retrieve hardware addresses
+/// @brief Tests whether MySQL can store and retrieve hardware addresses
TEST_F(MySqlLeaseMgrTest, testLease6HWTypeAndSource) {
testLease6HWTypeAndSource();
}
testRecountLeaseStats6();
}
-// @brief Tests that leases from specific subnet can be removed.
+/// @brief Tests that leases from specific subnet can be removed.
TEST_F(MySqlLeaseMgrTest, DISABLED_wipeLeases4) {
testWipeLeases4();
}
-// @brief Tests that leases from specific subnet can be removed.
+/// @brief Tests that leases from specific subnet can be removed.
TEST_F(MySqlLeaseMgrTest, DISABLED_wipeLeases6) {
testWipeLeases6();
}
/// Closes the database and re-open it. Anything committed should be
/// visible.
///
- /// Parameter is ignored for PostgreSQL backend as the v4 and v6 leases share
+ /// Parameter is ignored for PostgreSQL backend as the v4 and v6 hosts share
/// the same database.
void reopen(Universe) {
HostMgr::create();
///
/// This test checks if the PgSqlHostDataSource can be instantiated. This happens
/// only if the database can be opened. Note that this is not part of the
-/// PgSqlLeaseMgr test fixure set. This test checks that the database can be
+/// PgSqlHostMgr test fixure set. This test checks that the database can be
/// opened: the fixtures assume that and check basic operations.
TEST(PgSqlHostDataSource, OpenDatabase) {
try {
string connection_string = validPgSQLConnectionString() + string(" ") +
string(VALID_TIMEOUT);
+ HostMgr::create();
EXPECT_NO_THROW(HostMgr::addBackend(connection_string));
HostMgr::delBackend("postgresql");
} catch (const isc::Exception& ex) {
<< "*** before the PostgreSQL tests will run correctly.\n";
}
- // Check that attempting to get an instance of the host manager when
+ // Check that attempting to get an instance of the host data source when
// none is set throws an exception.
EXPECT_FALSE(HostMgr::instance().getHostDataSource());
}
/// @brief Make sure open failures do NOT invoke db lost callback
-/// The db lost callback should only be invoked after succesfully
+/// The db lost callback should only be invoked after successfully
/// opening the DB and then subsequently losing it. Failing to
/// open should be handled directly by the application layer.
/// There is simply no good way to break the connection in a
/// Adds 3 lease and verifies fetch by DUID.
/// Verifies retrival of non existant DUID fails
TEST_F(PgSqlLeaseMgrTest, getLeases6Duid) {
- testGetLeases6Duid();
+ testGetLeases6Duid();
}
/// @brief Check GetLease6 methods - access by DUID/IAID/SubnetID
testNullDuid();
}
-/// @brief Tests whether Postgres can store and retrieve hardware addresses
+/// @brief Tests whether PostgreSQL can store and retrieve hardware addresses
TEST_F(PgSqlLeaseMgrTest, testLease6Mac) {
testLease6MAC();
}
-/// @brief Tests whether Postgres can store and retrieve hardware addresses
+/// @brief Tests whether PostgreSQL can store and retrieve hardware addresses
TEST_F(PgSqlLeaseMgrTest, testLease6HWTypeAndSource) {
testLease6HWTypeAndSource();
}
testRecountLeaseStats6();
}
-// @brief Tests that leases from specific subnet can be removed.
+/// @brief Tests that leases from specific subnet can be removed.
TEST_F(PgSqlLeaseMgrTest, DISABLED_wipeLeases4) {
testWipeLeases4();
}
-// @brief Tests that leases from specific subnet can be removed.
+/// @brief Tests that leases from specific subnet can be removed.
TEST_F(PgSqlLeaseMgrTest, DISABLED_wipeLeases6) {
testWipeLeases6();
}
/// This function is used to attempt lost connectivity
/// with backends, notably MySQL and Postgresql.
///
-/// The theory being, that in a confined test environment the last
+/// The theory being, that in a confined test environment the last
/// such descriptor is the SQL client socket descriptor. This allows
/// us to the close that descriptor and simulate a loss of server
/// connectivity.
return false;
}
+bool
+GenericHostDataSourceTest::compareHostsIdentifier(const ConstHostPtr& host1,
+ const ConstHostPtr& host2) {
+ auto host1_i = host1->getIdentifier();
+ auto host2_i = host2->getIdentifier();
+ auto count1 = host1_i.size();
+ auto count2 = host2_i.size();
+ if (count1 > count2) {
+ count1 = count2;
+ }
+ for (uint8_t i = 0; i < count1; ++i) {
+ if (host1_i[i] != host2_i[i]) {
+ return (host1_i[i] < host2_i[i]);
+ }
+ }
+ return false;
+}
+
DuidPtr
GenericHostDataSourceTest::HWAddrToDuid(const HWAddrPtr& hwaddr) {
if (!hwaddr) {
EXPECT_FALSE(host_by_id);
}
-void GenericHostDataSourceTest::testMaxSubnetId6() {
+void
+GenericHostDataSourceTest::testMaxSubnetId6() {
std::vector<uint8_t> ident;
ident = HostDataSourceUtils::generateIdentifier();
uint64_t host_id(0);
HostPageSize page_size(4);
ConstHostCollection page;
+ ConstHostCollection all_pages;
ASSERT_NO_THROW(page = hdsptr_->getPage6(subnet6, idx, host_id, page_size));
ASSERT_EQ(4, page.size());
host_id = page[3]->getHostId();
ASSERT_NE(0, host_id);
- // Verify we got what we expected.
- for (size_t i = 0; i < 4; ++i) {
- HostDataSourceUtils::compareHosts(hosts[i], page[i]);
- }
+ std::copy(page.begin(), page.end(), std::back_inserter(all_pages));
// Get second and last pages.
ASSERT_NO_THROW(page = hdsptr_->getPage6(subnet6, idx, host_id, page_size));
ASSERT_EQ(1, page.size());
host_id = page[0]->getHostId();
- // Verify we got what we expected.
- HostDataSourceUtils::compareHosts(hosts[4], page[0]);
+ std::copy(page.begin(), page.end(), std::back_inserter(all_pages));
// Verify we have everything.
ASSERT_NO_THROW(page = hdsptr_->getPage6(subnet6, idx, host_id, page_size));
ASSERT_EQ(0, page.size());
+
+ // hosts are sorted by generated host_id (which is an auto increment for
+ // MySql and PostgreSql and a hash for Cassandra) so the hosts must be
+ // sorted by host identifier
+ std::sort(all_pages.begin(), all_pages.end(), compareHostsIdentifier);
+
+ // Verify we got what we expected.
+ for (size_t i = 0; i < 5; ++i) {
+ HostDataSourceUtils::compareHosts(hosts[i], all_pages[i]);
+ }
}
void
uint64_t host_id(0);
HostPageSize page_size(3);
ConstHostCollection page;
+ ConstHostCollection all_pages;
ASSERT_NO_THROW(page = hdsptr_->getPage4(subnet4, idx, host_id, page_size));
ASSERT_EQ(3, page.size());
host_id = page[2]->getHostId();
ASSERT_NE(0, host_id);
- // Verify retrieved hosts.
- for (size_t i = 0; i < 3; ++i) {
- HostDataSourceUtils::compareHosts(hosts[i * 2], page[i]);
- }
+ std::copy(page.begin(), page.end(), std::back_inserter(all_pages));
// Get second and last pages.
ASSERT_NO_THROW(page = hdsptr_->getPage4(subnet4, idx, host_id, page_size));
ASSERT_EQ(2, page.size());
host_id = page[1]->getHostId();
- // Verify retrieved hosts.
- for (size_t i = 0; i < 2; ++i) {
- HostDataSourceUtils::compareHosts(hosts[(i + 3) * 2], page[i]);
- }
+ std::copy(page.begin(), page.end(), std::back_inserter(all_pages));
// Verify we have everything.
ASSERT_NO_THROW(page = hdsptr_->getPage4(subnet4, idx, host_id, page_size));
ASSERT_EQ(0, page.size());
+ // hosts are sorted by generated host_id (which is an auto increment for
+ // MySql and PostgreSql and a hash for Cassandra) so the hosts must be
+ // sorted by host identifier
+ std::sort(all_pages.begin(), all_pages.end(), compareHostsIdentifier);
+
+ // Verify we got what we expected.
+ for (size_t i = 0; i < 5; ++i) {
+ HostDataSourceUtils::compareHosts(hosts[i * 2], all_pages[i]);
+ }
+
+ all_pages.clear();
+
// Second subnet.
++subnet4;
host_id = page[2]->getHostId();
ASSERT_NE(0, host_id);
- // Verify retrieved hosts.
- for (size_t i = 0; i < 3; ++i) {
- HostDataSourceUtils::compareHosts(hosts[1 + (i * 2)], page[i]);
- }
+ std::copy(page.begin(), page.end(), std::back_inserter(all_pages));
// Get second and last pages.
ASSERT_NO_THROW(page = hdsptr_->getPage4(subnet4, idx, host_id, page_size));
ASSERT_EQ(2, page.size());
host_id = page[1]->getHostId();
- // Verify retrieved hosts.
- for (size_t i = 0; i < 2; ++i) {
- HostDataSourceUtils::compareHosts(hosts[1 + ((i + 3) * 2)], page[i]);
- }
+ std::copy(page.begin(), page.end(), std::back_inserter(all_pages));
// Verify we have everything.
ASSERT_NO_THROW(page = hdsptr_->getPage4(subnet4, idx, host_id, page_size));
ASSERT_EQ(0, page.size());
+
+ // hosts are sorted by generated host_id (which is an auto increment for
+ // MySql and PostgreSql and a hash for Cassandra) so the hosts must be
+ // sorted by host identifier
+ std::sort(all_pages.begin(), all_pages.end(), compareHostsIdentifier);
+
+ // Verify we got what we expected.
+ for (size_t i = 0; i < 5; ++i) {
+ HostDataSourceUtils::compareHosts(hosts[i * 2 + 1], all_pages[i]);
+ }
}
void
uint64_t host_id(0);
HostPageSize page_size(3);
ConstHostCollection page;
+ ConstHostCollection all_pages;
ASSERT_NO_THROW(page = hdsptr_->getPage6(subnet6, idx, host_id, page_size));
ASSERT_EQ(3, page.size());
host_id = page[2]->getHostId();
ASSERT_NE(0, host_id);
- // Verify retrieved hosts.
- for (size_t i = 0; i < 3; ++i) {
- HostDataSourceUtils::compareHosts(hosts[i * 2], page[i]);
- }
+ std::copy(page.begin(), page.end(), std::back_inserter(all_pages));
// Get second and last pages.
ASSERT_NO_THROW(page = hdsptr_->getPage6(subnet6, idx, host_id, page_size));
ASSERT_EQ(2, page.size());
host_id = page[1]->getHostId();
- // Verify retrieved hosts.
- for (size_t i = 0; i < 2; ++i) {
- HostDataSourceUtils::compareHosts(hosts[(i + 3) * 2], page[i]);
- }
+ std::copy(page.begin(), page.end(), std::back_inserter(all_pages));
// Verify we have everything.
ASSERT_NO_THROW(page = hdsptr_->getPage6(subnet6, idx, host_id, page_size));
ASSERT_EQ(0, page.size());
+ // hosts are sorted by generated host_id (which is an auto increment for
+ // MySql and PostgreSql and a hash for Cassandra) so the hosts must be
+ // sorted by host identifier
+ std::sort(all_pages.begin(), all_pages.end(), compareHostsIdentifier);
+
+ // Verify we got what we expected.
+ for (size_t i = 0; i < 5; ++i) {
+ HostDataSourceUtils::compareHosts(hosts[i * 2], all_pages[i]);
+ }
+
+ all_pages.clear();
+
// Second subnet.
++subnet6;
host_id = page[2]->getHostId();
ASSERT_NE(0, host_id);
- // Verify retrieved hosts.
- for (size_t i = 0; i < 3; ++i) {
- HostDataSourceUtils::compareHosts(hosts[1 + (i * 2)], page[i]);
- }
+ std::copy(page.begin(), page.end(), std::back_inserter(all_pages));
// Get second and last pages.
ASSERT_NO_THROW(page = hdsptr_->getPage6(subnet6, idx, host_id, page_size));
ASSERT_EQ(2, page.size());
host_id = page[1]->getHostId();
- // Verify retrieved hosts.
- for (size_t i = 0; i < 2; ++i) {
- HostDataSourceUtils::compareHosts(hosts[1 + ((i + 3) * 2)], page[i]);
- }
+ std::copy(page.begin(), page.end(), std::back_inserter(all_pages));
// Verify we have everything.
ASSERT_NO_THROW(page = hdsptr_->getPage6(subnet6, idx, host_id, page_size));
ASSERT_EQ(0, page.size());
+
+ // hosts are sorted by generated host_id (which is an auto increment for
+ // MySql and PostgreSql and a hash for Cassandra) so the hosts must be
+ // sorted by host identifier
+ std::sort(all_pages.begin(), all_pages.end(), compareHostsIdentifier);
+
+ // Verify we got what we expected.
+ for (size_t i = 0; i < 5; ++i) {
+ HostDataSourceUtils::compareHosts(hosts[i * 2 + 1], all_pages[i]);
+ }
}
void
int i = 0;
if (hdsptr_->getType() == "cql") {
// There is no ORDER BY in Cassandra. Order here. Remove this if entries
- // are implemented as ordered in the Cassandra host data source.
+ // are eventually implemented as ordered in the Cassandra host data
+ // source.
std::sort(all_by_id.begin(), all_by_id.end(), compareHostsForSort6);
}
for (ConstHostCollection::const_iterator it = all_by_id.begin();
host2->getIPv6Reservations());
}
-void GenericHostDataSourceTest::testOptionsReservations4(const bool formatted,
- ConstElementPtr user_context) {
+void
+GenericHostDataSourceTest::testOptionsReservations4(const bool formatted,
+ ConstElementPtr user_context) {
HostPtr host = HostDataSourceUtils::initializeHost4("192.0.2.5", Host::IDENT_HWADDR);
// Add a bunch of DHCPv4 and DHCPv6 options for the host.
ASSERT_NO_THROW(addTestOptions(host, formatted, DHCP4_ONLY, user_context));
ASSERT_NO_FATAL_FAILURE(HostDataSourceUtils::compareHosts(host, host_by_addr));
}
-void GenericHostDataSourceTest::testOptionsReservations6(const bool formatted,
- ConstElementPtr user_context) {
+void
+GenericHostDataSourceTest::testOptionsReservations6(const bool formatted,
+ ConstElementPtr user_context) {
HostPtr host = HostDataSourceUtils::initializeHost6("2001:db8::1", Host::IDENT_DUID, false);
// Add a bunch of DHCPv4 and DHCPv6 options for the host.
ASSERT_NO_THROW(addTestOptions(host, formatted, DHCP6_ONLY, user_context));
<< std::endl;
}
-void GenericHostDataSourceTest::testDeleteByAddr4() {
+void
+GenericHostDataSourceTest::testDeleteByAddr4() {
// Make sure we have a pointer to the host data source.
ASSERT_TRUE(hdsptr_);
EXPECT_FALSE(result);
}
-void GenericHostDataSourceTest::testDeleteById4() {
+void
+GenericHostDataSourceTest::testDeleteById4() {
// Make sure we have a pointer to the host data source.
ASSERT_TRUE(hdsptr_);
// Test checks when a IPv4 host with options is deleted that the options are
// deleted as well.
-void GenericHostDataSourceTest::testDeleteById4Options() {
+void
+GenericHostDataSourceTest::testDeleteById4Options() {
// Make sure we have a pointer to the host data source.
ASSERT_TRUE(hdsptr_);
EXPECT_FALSE(result);
}
-void GenericHostDataSourceTest::testDeleteById6() {
+void
+GenericHostDataSourceTest::testDeleteById6() {
// Make sure we have a pointer to the host data source.
ASSERT_TRUE(hdsptr_);
EXPECT_FALSE(result);
}
-void GenericHostDataSourceTest::testDeleteById6Options() {
+void
+GenericHostDataSourceTest::testDeleteById6Options() {
// Make sure we have a pointer to the host data source.
ASSERT_TRUE(hdsptr_);
/// @brief Used to sort a host collection by IPv4 subnet id.
/// @param host1 first host to be compared
/// @param host2 second host to be compared
+ /// @result return true if host1's subnet id is smaller than host2's
+ /// subnet id
static bool compareHostsForSort4(const ConstHostPtr& host1,
const ConstHostPtr& host2);
/// @brief Used to sort a host collection by IPv6 subnet id.
/// @param host1 first host to be compared
/// @param host2 second host to be compared
+ /// @result return true if host1's subnet id is smaller than host2's
+ /// subnet id
static bool compareHostsForSort6(const ConstHostPtr& host1,
const ConstHostPtr& host2);
+ /// @brief Used to sort a host collection by host identifier.
+ /// @param host1 first host to be compared
+ /// @param host2 second host to be compared
+ /// @result return true if host1's identifier is smaller than host2's
+ /// identifier
+ static bool compareHostsIdentifier(const ConstHostPtr& host1,
+ const ConstHostPtr& host2);
+
/// @brief Returns number of entries in the v4 options table.
///
/// This utility method is expected to be implemented by specific backends.
HostPtr host(new Host(&ident[0], ident.size(), identifier, subnet4, subnet6,
IOAddress("0.0.0.0")));
-
+
host->setKey(AuthKey(auth_key));
-
+
if (!prefix) {
// Create IPv6 reservation (for an address)
IPv6Resrv resv(IPv6Resrv::TYPE_NA, IOAddress(address), 128);
/upgrade_1.0_to_2.0.sh
/upgrade_2.0_to_3.0.sh
+/upgrade_3.0_to_4.0.sh
/wipe_data.sh
sqlscripts_DATA += dhcpdb_drop.cql
sqlscripts_DATA += upgrade_1.0_to_2.0.sh
sqlscripts_DATA += upgrade_2.0_to_3.0.sh
+sqlscripts_DATA += upgrade_3.0_to_4.0.sh
sqlscripts_DATA += wipe_data.sh
EXTRA_DIST = ${sqlscripts_DATA}
-- Table `lease4`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS lease4 (
- address int,
- hwaddr blob,
- client_id blob,
- valid_lifetime bigint,
- expire bigint,
- subnet_id int,
- fqdn_fwd boolean,
- fqdn_rev boolean,
- hostname varchar,
- state int,
+ address INT,
+ hwaddr BLOB,
+ client_id BLOB,
+ valid_lifetime BIGINT,
+ expire BIGINT,
+ subnet_id INT,
+ fqdn_fwd BOOLEAN,
+ fqdn_rev BOOLEAN,
+ hostname VARCHAR,
+ state INT,
PRIMARY KEY ((address))
);
-- Table `lease6`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS lease6 (
- address varchar,
- valid_lifetime bigint,
- expire bigint,
- subnet_id int,
- pref_lifetime bigint,
- duid blob,
- iaid int,
- lease_type int,
- prefix_len int,
- fqdn_fwd boolean,
- fqdn_rev boolean,
- hostname varchar,
- hwaddr blob,
- hwtype int,
- hwaddr_source int,
- state int,
+ address VARCHAR,
+ valid_lifetime BIGINT,
+ expire BIGINT,
+ subnet_id INT,
+ pref_lifetime BIGINT,
+ duid BLOB,
+ iaid INT,
+ lease_type INT,
+ prefix_len INT,
+ fqdn_fwd BOOLEAN,
+ fqdn_rev BOOLEAN,
+ hostname VARCHAR,
+ hwaddr BLOB,
+ hwtype INT,
+ hwaddr_source INT,
+ state INT,
PRIMARY KEY ((address))
);
-- Table `lease6_types`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS lease6_types (
- lease_type int, -- Lease type code.
- name varchar, -- Name of the lease type
+ lease_type INT, -- Lease type code.
+ name VARCHAR, -- Name of the lease type
PRIMARY KEY ((lease_type))
);
INSERT INTO lease6_types (lease_type, name) VALUES (0, 'IA_NA'); -- Non-temporary v6 addresses
-- Table `lease_hwaddr_source`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS lease_hwaddr_source (
- hwaddr_source int,
- name varchar,
+ hwaddr_source INT,
+ name VARCHAR,
PRIMARY KEY ((hwaddr_source))
);
-- Table `lease_state`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS lease_state (
- state int,
- name varchar,
+ state INT,
+ name VARCHAR,
PRIMARY KEY ((state))
);
-- Table `schema_version`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS schema_version (
- version int,
- minor int,
+ version INT,
+ minor INT,
PRIMARY KEY ((version))
);
-- Table `host_reservations`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS host_reservations (
- id bigint,
- host_identifier blob,
- host_identifier_type int,
- host_ipv4_subnet_id int,
- host_ipv6_subnet_id int,
- host_ipv4_address int,
- host_ipv4_next_server int,
- host_ipv4_server_hostname text,
- host_ipv4_boot_file_name text,
- hostname text,
- user_context text,
- host_ipv4_client_classes text,
- host_ipv6_client_classes text,
+ id BIGINT,
+ host_identifier BLOB,
+ host_identifier_type INT,
+ host_ipv4_subnet_id INT,
+ host_ipv6_subnet_id INT,
+ host_ipv4_address INT,
+ host_ipv4_next_server INT,
+ host_ipv4_server_hostname VARCHAR,
+ host_ipv4_boot_file_name VARCHAR,
+ hostname VARCHAR,
+ user_context VARCHAR,
+ host_ipv4_client_classes VARCHAR,
+ host_ipv6_client_classes VARCHAR,
-- reservation
- reserved_ipv6_prefix_address text,
- reserved_ipv6_prefix_length int,
- reserved_ipv6_prefix_address_type int,
- iaid int,
+ reserved_ipv6_prefix_address VARCHAR,
+ reserved_ipv6_prefix_length INT,
+ reserved_ipv6_prefix_address_type INT,
+ iaid INT,
-- option
- option_universe int,
- option_code int,
- option_value blob,
- option_formatted_value text,
- option_space text,
- option_is_persistent boolean,
- option_client_class text,
- option_subnet_id int,
- option_user_context text,
- option_scope_id int,
+ option_universe INT,
+ option_code INT,
+ option_value BLOB,
+ option_formatted_value VARCHAR,
+ option_space VARCHAR,
+ option_is_persistent BOOLEAN,
+ option_client_class VARCHAR,
+ option_subnet_id INT,
+ option_user_context VARCHAR,
+ option_scope_id INT,
PRIMARY KEY ((id))
);
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS host_identifier_type (
- type int,
- name varchar,
+ type INT,
+ name VARCHAR,
PRIMARY KEY ((type))
);
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS dhcp_option_scope (
- scope_id int,
- scope_name varchar,
- PRIMARY KEY ((scope_id))
+ scope_id INT,
+ scope_name VARCHAR,
+ PRIMARY KEY ((scope_id))
);
INSERT INTO dhcp_option_scope (scope_id, scope_name) VALUES (0, 'global');
-- This line starts database upgrade to version 3.0
-- Add a column holding leases for user context.
-ALTER TABLE lease4 ADD user_context text;
-ALTER TABLE lease6 ADD user_context text;
+ALTER TABLE lease4 ADD user_context VARCHAR;
+ALTER TABLE lease6 ADD user_context VARCHAR;
-- -----------------------------------------------------
-- Table `logs` (logs table is used by forensic logging hook library)
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS logs (
- timeuuid timeuuid, -- creation timeuuid, use dateOf() to get timestamp
- address varchar, -- address or prefix
- log text, -- the log itself
+ timeuuid TIMEUUID, -- creation timeuuid, use dateOf() to get timestamp
+ address VARCHAR, -- address or prefix
+ log VARCHAR, -- the log itself
PRIMARY KEY ((timeuuid))
);
CREATE INDEX IF NOT EXISTS logsindex ON logs (address);
-- This line adds auth_key column into host reservation table
-ALTER TABLE host_reservations ADD auth_key text;
+ALTER TABLE host_reservations ADD auth_key VARCHAR;
-- Cql requires primary keys in the WHERE here.
DELETE FROM schema_version WHERE version=2;
INSERT INTO schema_version (version, minor) VALUES(3, 0);
-- This line concludes database upgrade to version 3.0
+
+-- This line starts database upgrade to version 4.0
+
+-- -----------------------------------------------------
+-- Table `hosts`
+-- -----------------------------------------------------
+CREATE TABLE IF NOT EXISTS hosts (
+ key BIGINT,
+ id BIGINT,
+ host_identifier BLOB,
+ host_identifier_type INT,
+ host_ipv4_subnet_id INT,
+ host_ipv6_subnet_id INT,
+ host_ipv4_address INT,
+ host_ipv4_next_server INT,
+ host_ipv4_server_hostname VARCHAR,
+ host_ipv4_boot_file_name VARCHAR,
+ hostname VARCHAR,
+ auth_key VARCHAR,
+ user_context VARCHAR,
+ host_ipv4_client_classes VARCHAR,
+ host_ipv6_client_classes VARCHAR,
+ -- reservation
+ reserved_ipv6_prefix_address VARCHAR,
+ reserved_ipv6_prefix_length INT,
+ reserved_ipv6_prefix_address_type INT,
+ iaid INT,
+ -- option
+ option_universe INT,
+ option_code INT,
+ option_value BLOB,
+ option_formatted_value VARCHAR,
+ option_space VARCHAR,
+ option_is_persistent BOOLEAN,
+ option_client_class VARCHAR,
+ option_subnet_id INT,
+ option_user_context VARCHAR,
+ option_scope_id INT,
+ PRIMARY KEY ((key), id)
+);
+
+CREATE INDEX IF NOT EXISTS hostsindex1 ON hosts (host_identifier);
+CREATE INDEX IF NOT EXISTS hostsindex2 ON hosts (host_identifier_type);
+CREATE INDEX IF NOT EXISTS hostsindex3 ON hosts (host_ipv4_subnet_id);
+CREATE INDEX IF NOT EXISTS hostsindex4 ON hosts (host_ipv6_subnet_id);
+CREATE INDEX IF NOT EXISTS hostsindex5 ON hosts (host_ipv4_address);
+CREATE INDEX IF NOT EXISTS hostsindex6 ON hosts (reserved_ipv6_prefix_address);
+CREATE INDEX IF NOT EXISTS hostsindex7 ON hosts (reserved_ipv6_prefix_length);
+
+DROP TABLE IF EXISTS host_reservations;
+
+DROP INDEX IF EXISTS host_reservationsindex1;
+DROP INDEX IF EXISTS host_reservationsindex2;
+DROP INDEX IF EXISTS host_reservationsindex3;
+DROP INDEX IF EXISTS host_reservationsindex4;
+DROP INDEX IF EXISTS host_reservationsindex5;
+DROP INDEX IF EXISTS host_reservationsindex6;
+DROP INDEX IF EXISTS host_reservationsindex7;
+
+-- Cql requires primary keys in the WHERE here.
+DELETE FROM schema_version WHERE version=3;
+INSERT INTO schema_version (version, minor) VALUES(4, 0);
+
+-- This line concludes database upgrade to version 4.0
DROP TABLE IF EXISTS lease_hwaddr_source;
DROP TABLE IF EXISTS lease_state;
DROP TABLE IF EXISTS schema_version;
-DROP TABLE IF EXISTS host_reservations;
+DROP TABLE IF EXISTS hosts;
DROP TABLE IF EXISTS dhcp4_options;
DROP TABLE IF EXISTS dhcp6_options;
DROP TABLE IF EXISTS host_identifier_type;
DROP INDEX IF EXISTS lease6index5;
DROP INDEX IF EXISTS lease6index6;
-DROP INDEX IF EXISTS host_reservationsindex1;
-DROP INDEX IF EXISTS host_reservationsindex2;
-DROP INDEX IF EXISTS host_reservationsindex3;
-DROP INDEX IF EXISTS host_reservationsindex4;
-DROP INDEX IF EXISTS host_reservationsindex5;
-DROP INDEX IF EXISTS host_reservationsindex6;
-DROP INDEX IF EXISTS host_reservationsindex7;
+DROP INDEX IF EXISTS hostsindex1;
+DROP INDEX IF EXISTS hostsindex2;
+DROP INDEX IF EXISTS hostsindex3;
+DROP INDEX IF EXISTS hostsindex4;
+DROP INDEX IF EXISTS hostsindex5;
+DROP INDEX IF EXISTS hostsindex6;
+DROP INDEX IF EXISTS hostsindex7;
DROP INDEX IF EXISTS logsindex;
-- Table \`host_reservations\`
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS host_reservations (
- id bigint,
- host_identifier blob,
- host_identifier_type int,
- host_ipv4_subnet_id int,
- host_ipv6_subnet_id int,
- host_ipv4_address int,
- host_ipv4_next_server int,
- host_ipv4_server_hostname text,
- host_ipv4_boot_file_name text,
- hostname text,
- user_context text,
- host_ipv4_client_classes text,
- host_ipv6_client_classes text,
+ id BIGINT,
+ host_identifier BLOB,
+ host_identifier_type INT,
+ host_ipv4_subnet_id INT,
+ host_ipv6_subnet_id INT,
+ host_ipv4_address INT,
+ host_ipv4_next_server INT,
+ host_ipv4_server_hostname VARCHAR,
+ host_ipv4_boot_file_name VARCHAR,
+ hostname VARCHAR,
+ user_context VARCHAR,
+ host_ipv4_client_classes VARCHAR,
+ host_ipv6_client_classes VARCHAR,
-- reservation
- reserved_ipv6_prefix_address text,
- reserved_ipv6_prefix_length int,
- reserved_ipv6_prefix_address_type int,
- iaid int,
+ reserved_ipv6_prefix_address VARCHAR,
+ reserved_ipv6_prefix_length INT,
+ reserved_ipv6_prefix_address_type INT,
+ iaid INT,
-- option
- option_universe int,
- option_code int,
- option_value blob,
- option_formatted_value text,
- option_space text,
- option_is_persistent boolean,
- option_client_class text,
- option_subnet_id int,
- option_user_context text,
- option_scope_id int,
+ option_universe INT,
+ option_code INT,
+ option_value BLOB,
+ option_formatted_value VARCHAR,
+ option_space VARCHAR,
+ option_is_persistent BOOLEAN,
+ option_client_class VARCHAR,
+ option_subnet_id INT,
+ option_user_context VARCHAR,
+ option_scope_id INT,
PRIMARY KEY ((id))
);
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS host_identifier_type (
- type int,
- name varchar,
+ type INT,
+ name VARCHAR,
PRIMARY KEY ((type))
);
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS dhcp_option_scope (
- scope_id int,
- scope_name varchar,
- PRIMARY KEY ((scope_id))
+ scope_id INT,
+ scope_name VARCHAR,
+ PRIMARY KEY ((scope_id))
);
INSERT INTO dhcp_option_scope (scope_id, scope_name) VALUES (0, 'global');
-- This line starts database upgrade to version 3.0
-- Add a column holding leases for user context.
-ALTER TABLE lease4 ADD user_context text;
-ALTER TABLE lease6 ADD user_context text;
+ALTER TABLE lease4 ADD user_context VARCHAR;
+ALTER TABLE lease6 ADD user_context VARCHAR;
-- -----------------------------------------------------
-- Table logs (it is used by forensic logging hook library)
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS logs (
- timeuuid timeuuid, -- creation timeuuid, use dateOf() to get timestamp
- address varchar, -- address or prefix
- log text, -- the log itself
+ timeuuid TIMEUUID, -- creation timeuuid, use dateOf() to get timestamp
+ address VARCHAR, -- address or prefix
+ log VARCHAR, -- the log itself
PRIMARY KEY ((timeuuid))
);
CREATE INDEX IF NOT EXISTS logsindex ON logs (address);
-- Add auth_key for storing keys for DHCPV6 reconfigure.
-ALTER TABLE host_reservations ADD auth_key text;
+ALTER TABLE host_reservations ADD auth_key VARCHAR;
-- Cql requires primary keys in the WHERE here.
DELETE FROM schema_version WHERE version=2;
# col - column name of the column in question
#
check_column() {
- local val="$1";shift
- local col="$1"
- local old_id="0"
- local new_id="-1"
- local comma=""
-
- # If the current value equals the value to be replaced
- # add it to the accumulator
- if [ "$val" = "$old_id" ]
- then
+ local val="$1";shift
+ local col="$1"
+ local old_id="0"
+ local new_id="-1"
+ local comma=""
+
+ # If the current value equals the value to be replaced
+ # add it to the accumulator
+ if [ "$val" = "$old_id" ]
+ then
# If the accumulator isn't empty, we need a comma
if [ ! -z "$update_cols" ]
then
fi
update_cols="$update_cols$comma $col = $new_id"
- fi
+ fi
}
# This function converts subnet ID columns in of existing host_reservations
line_cnt=0;
update_cnt=0;
- while read line
+ while read -r line
do
line_cnt=$((line_cnt + 1));
update_cols=""
--- /dev/null
+#!/bin/sh
+
+prefix=@prefix@
+# Include utilities. Use installed version if available and
+# use build version if it isn't.
+if [ -e "@datarootdir@/@PACKAGE_NAME@/scripts/admin-utils.sh" ]; then
+ . @datarootdir@/@PACKAGE_NAME@/scripts/admin-utils.sh
+else
+ . @abs_top_builddir@/src/bin/admin/admin-utils.sh
+fi
+
+# Need a path for temporary files created during upgrade data migration
+# Use the state directory in the install path directory if it exists, otherwise
+# use the build tree
+if [ -e "@localstatedir@/@PACKAGE_NAME@" ]; then
+ temp_file_dir="@localstatedir@/@PACKAGE_NAME@"
+else
+ temp_file_dir="@abs_top_builddir@/src/share/database/scripts/cql"
+fi
+
+cqlargs=$@
+
+# Ensures the current schema version is 3.0. If not it exits.
+check_version() {
+ version=$(cql_version $cqlargs)
+
+ if [ "${version}" != "3.0" ]; then
+ printf "This script upgrades 3.0 to 4.0. Reported version is %s. Skipping upgrade.\n" "${version}"
+ exit 0
+ fi
+}
+
+# Peforms the schema changes from 3.0 to 4.0
+update_schema() {
+ cqlsh $cqlargs <<EOF
+-- This line starts database upgrade to version 4.0
+
+-- -----------------------------------------------------
+-- Table \`hosts\`
+-- -----------------------------------------------------
+CREATE TABLE IF NOT EXISTS hosts (
+ key BIGINT,
+ id BIGINT,
+ host_identifier BLOB,
+ host_identifier_type INT,
+ host_ipv4_subnet_id INT,
+ host_ipv6_subnet_id INT,
+ host_ipv4_address INT,
+ host_ipv4_next_server INT,
+ host_ipv4_server_hostname VARCHAR,
+ host_ipv4_boot_file_name VARCHAR,
+ hostname VARCHAR,
+ auth_key VARCHAR,
+ user_context VARCHAR,
+ host_ipv4_client_classes VARCHAR,
+ host_ipv6_client_classes VARCHAR,
+ -- reservation
+ reserved_ipv6_prefix_address VARCHAR,
+ reserved_ipv6_prefix_length INT,
+ reserved_ipv6_prefix_address_type INT,
+ iaid INT,
+ -- option
+ option_universe INT,
+ option_code INT,
+ option_value BLOB,
+ option_formatted_value VARCHAR,
+ option_space VARCHAR,
+ option_is_persistent BOOLEAN,
+ option_client_class VARCHAR,
+ option_subnet_id INT,
+ option_user_context VARCHAR,
+ option_scope_id INT,
+ PRIMARY KEY ((key), id)
+);
+
+CREATE INDEX IF NOT EXISTS hostsindex1 ON hosts (host_identifier);
+CREATE INDEX IF NOT EXISTS hostsindex2 ON hosts (host_identifier_type);
+CREATE INDEX IF NOT EXISTS hostsindex3 ON hosts (host_ipv4_subnet_id);
+CREATE INDEX IF NOT EXISTS hostsindex4 ON hosts (host_ipv6_subnet_id);
+CREATE INDEX IF NOT EXISTS hostsindex5 ON hosts (host_ipv4_address);
+CREATE INDEX IF NOT EXISTS hostsindex6 ON hosts (reserved_ipv6_prefix_address);
+CREATE INDEX IF NOT EXISTS hostsindex7 ON hosts (reserved_ipv6_prefix_length);
+EOF
+
+ if [ "$?" -ne 0 ]
+ then
+ echo Schema udpate FAILED!
+ exit 1
+ fi
+}
+
+# Peforms the clean up schema changes from 3.0 to 4.0
+clean_up_schema() {
+ cqlsh $cqlargs <<EOF
+DROP TABLE IF EXISTS host_reservations;
+
+DROP INDEX IF EXISTS host_reservationsindex1;
+DROP INDEX IF EXISTS host_reservationsindex2;
+DROP INDEX IF EXISTS host_reservationsindex3;
+DROP INDEX IF EXISTS host_reservationsindex4;
+DROP INDEX IF EXISTS host_reservationsindex5;
+DROP INDEX IF EXISTS host_reservationsindex6;
+DROP INDEX IF EXISTS host_reservationsindex7;
+
+-- Cql requires primary keys in the WHERE here.
+DELETE FROM schema_version WHERE version=3;
+INSERT INTO schema_version (version, minor) VALUES(4, 0);
+
+-- This line concludes database upgrade to version 4.0
+EOF
+
+ if [ "$?" -ne 0 ]
+ then
+ echo Schema udpate FAILED!
+ exit 1
+ fi
+}
+
+# Function to delete temporary migration files
+clean_up() {
+ # clean up the files
+ if [ -e "$export_file" ]
+ then
+ rm $export_file
+ fi
+
+ if [ -e "$update_file" ]
+ then
+ rm $update_file
+ fi
+}
+
+# Function to clean up and exit the script gracefully
+#
+# Called by migrate_host_data()
+#
+# Parameters:
+# status - integer value to pass to sh:exit
+# explanation - "quoted" text message to emit to stdout
+exit_now() {
+ status=$1;shift
+ explanation=$1
+
+ clean_up
+ if [ "$status" -eq 0 ]
+ then
+ clean_up_schema
+ echo "Data Migration SUCCESS! $explanation"
+ else
+ echo "Data Migration FAILURE! $explanation"
+ fi
+
+ exit $status
+}
+
+fill() {
+ string=$1;shift
+ count=$1;shift
+ fill_char=$1;shift
+ length=`echo $string | wc -c`
+ length=$(($length - 1))
+ if [ $length -gt $count ]; then
+ value=`echo "$string" | cut -c 1-$count`
+ return
+ fi
+ result=""
+ count=$(($count - $length))
+ i=1
+ while [ $i -le $count ]; do
+ result="$fill_char$result"
+ i=$((i+1))
+ done
+ value="$result$string"
+}
+
+identifier_text() {
+ string=$1;shift
+ length=`echo $string | wc -c`
+ length=$(($length - 1))
+ # skip 0x from 0xabcdef
+ string=`echo "$string" | cut -c 3-$length`
+ identifier=""
+ # add starting 0: 0xabc->0x0abc
+ mod=$(($length % 2))
+ if [ $mod -ne 0 ]; then
+ string="0"$string
+ fi
+ length=`echo $string | wc -c`
+ length=$(($length - 1))
+ i=1
+ while [ $i -le $length ]; do
+ char=`echo "$string" | cut -c $i-$i`
+ mod=$(($i % 2))
+ if [ $mod -ne 0 -a $i -ne 1 ]; then
+ char=":"$char
+ fi
+ identifier=$identifier$char
+ i=$((i+1))
+ done
+}
+
+key_hash() {
+ string=$1;shift
+ length=`echo $string | wc -c`
+ length=$(($length - 1))
+ FNV_prime=1099511628211
+ FNV_offset_basis=-3750763034362895579 # signed value for 14695981039346656037
+ MAX_UINT64=18446744073709551615
+ hash=$FNV_offset_basis
+ i=1
+ while [ $i -le $length ]; do
+ char=`echo "$string" | cut -c $i-$i`
+ data=`echo "$char" | tr -d "\n" | od -An -t uC | tr -d ' '`
+ hash=$(($hash ^ $data))
+ hash=$(($hash * $FNV_prime))
+ hash=$(($hash % $MAX_UINT64))
+ i=$((i+1))
+ done
+}
+
+generate_key() {
+ host_id=$1;shift
+ host_identifier=$1;shift
+ host_identifier_type=$1;shift
+ host_ipv4_subnet_id=$1;shift
+ host_ipv6_subnet_id=$1;shift
+ host_ipv4_address=$1;shift
+ key=""
+ identifier_text "$host_identifier"
+ local_host_identifier=$identifier
+ if [ ! -z $host_ipv4_address ] && [ $host_ipv4_address -eq 0 ]; then
+ fill "$local_host_identifier" 383 "-"
+ key="$key$value"
+ fill "$host_identifier_type" 10 "-"
+ key="$key$value"
+ else
+ fill "" 383 "-"
+ key="$key$value"
+ fill "" 10 "-"
+ key="$key$value"
+ fi
+ fill "$host_ipv4_subnet_id" 10 "-"
+ key="$key$value"
+ fill "$host_ipv6_subnet_id" 10 "-"
+ key="$key$value"
+ fill "$host_ipv4_address" 15 "-"
+ key="$key$value"
+ key_hash "$key"
+ key="$hash"
+}
+
+# This function adds host 'key' column which is the partition key
+# of the 'hosts' table.
+#
+# After exhausting the export file, the update file is submitted to
+# cqlsh for execution.
+#
+# No parameters.
+migrate_host_data() {
+ export_file="$temp_file_dir/cql_export.csv"
+ update_file="$temp_file_dir/cql_update.cql"
+
+ clean_up
+
+ # Fetch host_reservation data so we have host_identifier,
+ # host_identifier_type, host_ipv4_subnet_id, host_ipv6_subnet_id and
+ # host_ipv4_address to generate host key
+ echo "Exporting host_reservation data to $export_file ..."
+ query="COPY host_reservations \
+ (id, host_identifier, host_identifier_type, host_ipv4_subnet_id, \
+ host_ipv6_subnet_id, host_ipv4_address, host_ipv4_next_server, \
+ host_ipv4_server_hostname, host_ipv4_boot_file_name, hostname, \
+ auth_key, user_context, host_ipv4_client_classes, \
+ host_ipv6_client_classes, reserved_ipv6_prefix_address, \
+ reserved_ipv6_prefix_length, reserved_ipv6_prefix_address_type, \
+ iaid, option_universe, option_code, option_value, \
+ option_formatted_value, option_space, option_is_persistent, \
+ option_client_class, option_subnet_id, option_user_context, \
+ option_scope_id) \
+ TO '$export_file'"
+
+ cqlsh $cqlargs -e "$query"
+ if [ "$?" -ne 0 ]
+ then
+ exit_now 1 "Cassandra export failed! Could not migrate data!"
+ fi
+
+ # Strip the carriage returns that CQL insists on adding.
+ if [ -e "$export_file" ]
+ then
+ cat $export_file | tr -d '\015' > $export_file.2
+ mv $export_file.2 $export_file
+ else
+ # Shouldn't happen but then again we're talking about CQL here
+ exit_now 1 "Cassandra export file $export_file is missing?"
+ fi
+
+ # Iterate through the exported data, accumulating update statements,
+ # one for each reservation that needs updating. We should have one
+ # host per line.
+ line_cnt=0;
+ update_cnt=0;
+
+ while read -r line
+ do
+ line_cnt=$((line_cnt + 1));
+ xIFS="$IFS"
+ IFS=','
+ i=1
+ # Parse the column values
+ for val in $line
+ do
+ case $i in
+ 1)
+ host_id=$val
+ ;;
+ 2)
+ host_identifier=$val
+ ;;
+ 3)
+ host_identifier_type=$val
+ ;;
+ 4)
+ host_ipv4_subnet_id=$val
+ ;;
+ 5)
+ host_ipv6_subnet_id=$val
+ ;;
+ 6)
+ host_ipv4_address=$val
+ ;;
+ *)
+ ;;
+ esac
+ i=$((i + 1))
+ done
+
+ generate_key "$host_id" "$host_identifier" "$host_identifier_type" "$host_ipv4_subnet_id" "$host_ipv6_subnet_id" "$host_ipv4_address"
+ key_data="$key"
+ update_cnt=$((update_cnt + 1))
+
+ IFS="$xIFS"
+ echo $line | sed -e "s/$host_id/$host_id,$key_data/" >> $update_file
+ done < $export_file
+
+ # If we didn't record any updates, then hey, we're good to go!
+ if [ "$update_cnt" -eq 0 ]
+ then
+ exit_now 0 "Completed successfully: No updates were needed"
+ fi
+
+ # We have at least one update in the update file, so submit it # to cqlsh.
+ echo "$update_cnt update statements written to $update_file"
+ echo "Running the updates..."
+ query="COPY hosts \
+ (id, key, host_identifier, host_identifier_type, host_ipv4_subnet_id, \
+ host_ipv6_subnet_id, host_ipv4_address, host_ipv4_next_server, \
+ host_ipv4_server_hostname, host_ipv4_boot_file_name, hostname, \
+ auth_key, user_context, host_ipv4_client_classes, \
+ host_ipv6_client_classes, reserved_ipv6_prefix_address, \
+ reserved_ipv6_prefix_length, reserved_ipv6_prefix_address_type, \
+ iaid, option_universe, option_code, option_value, \
+ option_formatted_value, option_space, option_is_persistent, \
+ option_client_class, option_subnet_id, option_user_context, \
+ option_scope_id) \
+ FROM '$update_file'"
+
+ cqlsh $cqlargs -e "$query"
+ if [ "$?" -ne 0 ]
+ then
+ exit_now 1 "Cassandra updates failed"
+ fi
+
+ exit_now 0 "Updated $update_cnt of $line_cnt records"
+}
+
+check_version
+update_schema
+migrate_host_data
TRUNCATE TABLE lease6_types;
TRUNCATE TABLE lease_hwaddr_source;
TRUNCATE TABLE lease_state;
-TRUNCATE TABLE host_reservations;
+TRUNCATE TABLE hosts;
TRUNCATE TABLE logs;
EOF
then
exit 0
fi
+
if [ "$1" = "--cflags-only-I" ]
then
echo "-I${CPP_DRIVER_PATH}/include/"
fi
if [ "$1" = "--libs" ]
then
- echo "-L${CPP_DRIVER_PATH}/build/ -l${cql_lib} -luv"
+ echo "-L${CPP_DRIVER_PATH}/build/ -l${cql_lib} -lssl -luv"
exit 0
fi
+
if [ "$1" = "--modversion" ]
then
MAJOR=$(grep VERSION_MAJOR "${CPP_DRIVER_PATH}/include/cassandra.h" | cut -d " " -f 3)
echo "${MAJOR}.${MINOR}.${PATCH}"
exit 0
fi
+
if [ "$1" = "--print-errors" ]
then
exit 0
fi
+
echo "wrong parameter"
echo "run: \`$0 --help\` for more help"
+
exit 1