]> git.ipfire.org Git - location/libloc.git/commitdiff
importer: Drop EDROP as it has been merged into DROP master
authorMichael Tremer <michael.tremer@ipfire.org>
Thu, 11 Apr 2024 17:45:18 +0000 (17:45 +0000)
committerMichael Tremer <michael.tremer@ipfire.org>
Thu, 11 Apr 2024 17:45:52 +0000 (17:45 +0000)
https://www.spamhaus.org/resource-hub/network-security/spamhaus-drop-and-edrop-to-become-a-single-list/

Signed-off-by: Michael Tremer <michael.tremer@ipfire.org>
127 files changed:
.gitignore
Makefile.am
README.md [new file with mode: 0644]
bash-completion/location [new file with mode: 0644]
configure.ac
data/database.db [new file with mode: 0644]
data/signing-key.pem [moved from src/signing-key.pem with 100% similarity]
debian/build.sh [changed mode: 0644->0755]
debian/changelog
debian/compat [deleted file]
debian/control
debian/copyright
debian/genchangelog.sh [new file with mode: 0755]
debian/libloc-dev.install
debian/libloc1.install
debian/libloc1.symbols
debian/location-importer.install [deleted file]
debian/location-perl.install [deleted file]
debian/location-python.install [deleted file]
debian/location.install
debian/location.manpages [deleted file]
debian/location.postinst [new file with mode: 0644]
debian/location.postrm [new file with mode: 0644]
debian/python3-location.examples [moved from debian/location-python.examples with 100% similarity]
debian/python3-location.install [new file with mode: 0644]
debian/rules
debian/watch
m4/attributes.m4 [new file with mode: 0644]
m4/ax_prog_lua_modules.m4 [new file with mode: 0644]
man/libloc.txt [new file with mode: 0644]
man/loc_database_count_as.txt [new file with mode: 0644]
man/loc_database_get_as.txt [new file with mode: 0644]
man/loc_database_get_country.txt [new file with mode: 0644]
man/loc_database_lookup.txt [new file with mode: 0644]
man/loc_database_new.txt [new file with mode: 0644]
man/loc_get_log_priority.txt [new file with mode: 0644]
man/loc_new.txt [new file with mode: 0644]
man/loc_set_log_fn.txt [new file with mode: 0644]
man/loc_set_log_priority.txt [new file with mode: 0644]
man/location.txt
po/LINGUAS
po/POTFILES.in
po/de.po
po/ka.po [new file with mode: 0644]
src/.gitignore
src/address.c [new file with mode: 0644]
src/as-list.c
src/as.c
src/country-list.c
src/country.c
src/cron/location-update.in [new file with mode: 0644]
src/database.c
src/libloc.c
src/libloc.sym
src/libloc/address.h [new file with mode: 0644]
src/libloc/as-list.h [moved from src/loc/as-list.h with 93% similarity]
src/libloc/as.h [moved from src/loc/as.h with 94% similarity]
src/libloc/compat.h [moved from src/loc/compat.h with 92% similarity]
src/libloc/country-list.h [moved from src/loc/country-list.h with 93% similarity]
src/libloc/country.h [moved from src/loc/country.h with 88% similarity]
src/libloc/database.h [moved from src/loc/database.h with 93% similarity]
src/libloc/format.h [moved from src/loc/format.h with 96% similarity]
src/libloc/libloc.h [moved from src/loc/libloc.h with 91% similarity]
src/libloc/network-list.h [moved from src/loc/network-list.h with 79% similarity]
src/libloc/network-tree.h [new file with mode: 0644]
src/libloc/network.h [moved from src/loc/network.h with 60% similarity]
src/libloc/private.h [moved from src/loc/private.h with 81% similarity]
src/libloc/resolv.h [moved from src/loc/resolv.h with 96% similarity]
src/libloc/stringpool.h [moved from src/loc/stringpool.h with 95% similarity]
src/libloc/writer.h [moved from src/loc/writer.h with 92% similarity]
src/lua/as.c [new file with mode: 0644]
src/lua/as.h [new file with mode: 0644]
src/lua/compat.h [new file with mode: 0644]
src/lua/country.c [new file with mode: 0644]
src/lua/country.h [new file with mode: 0644]
src/lua/database.c [new file with mode: 0644]
src/lua/database.h [new file with mode: 0644]
src/lua/location.c [new file with mode: 0644]
src/lua/location.h [new file with mode: 0644]
src/lua/network.c [new file with mode: 0644]
src/lua/network.h [new file with mode: 0644]
src/network-list.c
src/network-tree.c [new file with mode: 0644]
src/network.c
src/perl/Location.xs
src/python/as.c
src/python/as.h
src/python/country.c
src/python/country.h
src/python/database.c
src/python/database.h
src/python/database.py [deleted file]
src/python/importer.py [deleted file]
src/python/location-importer.in [deleted file]
src/python/location/__init__.py [moved from src/python/__init__.py.in with 87% similarity]
src/python/location/database.py [new file with mode: 0644]
src/python/location/downloader.py [moved from src/python/downloader.py with 81% similarity]
src/python/location/export.py [moved from src/python/export.py with 54% similarity]
src/python/location/i18n.py [moved from src/python/i18n.py with 98% similarity]
src/python/location/logger.py [moved from src/python/logger.py with 99% similarity]
src/python/locationmodule.c
src/python/locationmodule.h
src/python/network.c
src/python/network.h
src/python/writer.c
src/python/writer.h
src/resolv.c
src/scripts/location-importer.in [new file with mode: 0644]
src/scripts/location.in [moved from src/python/location.in with 93% similarity]
src/stringpool.c
src/systemd/location-update.service.in
src/test-address.c [new file with mode: 0644]
src/test-as.c
src/test-country.c
src/test-database.c
src/test-libloc.c
src/test-network-list.c
src/test-network.c
src/test-signature.c
src/test-stringpool.c
src/writer.c
tests/lua/main.lua [new file with mode: 0755]
tests/python/country.py [new file with mode: 0755]
tests/python/networks-dedup.py [new file with mode: 0755]
tests/python/test-database.py [new file with mode: 0755]
tests/python/test-export.py [new file with mode: 0755]
tools/copy.py [new file with mode: 0644]

index d368dbd7cc3205900226403f663bdae8cbb8fb17..4823554894dd27129dbcb55887ccae983812d890 100644 (file)
@@ -1,9 +1,9 @@
-*.db
-*.db.xz
+*~
 *.log
 *.mo
 *.o
 *.tar.xz
+*.trs
 .deps/
 .libs/
 Makefile
@@ -13,11 +13,13 @@ Makefile.in
 /build-aux
 /config.*
 /configure
+/*.db
+/*.db.xz
 /libtool
 /stamp-h1
-/src/python/location
-/src/python/location-importer
-/src/python/__init__.py
+/src/cron/location-update
+/src/scripts/location
+/src/scripts/location-importer
 /src/systemd/location-update.service
 /src/systemd/location-update.timer
 /test.db
index dbdfd8efcb803395fae2514a80d04968e60db346..b045e49e3d83aaaffed9695bb3d8229b04ee7672 100644 (file)
@@ -3,6 +3,7 @@ CLEANFILES =
 INSTALL_DIRS =
 ACLOCAL_AMFLAGS = -I m4 ${ACLOCAL_FLAGS}
 AM_MAKEFLAGS = --no-print-directory
+check_SCRIPTS =
 
 SUBDIRS = . po
 BINDINGS =
@@ -13,6 +14,8 @@ if ENABLE_PERL
 BINDINGS += perl
 endif
 
+bashcompletiondir = @bashcompletiondir@
+
 AM_CPPFLAGS = \
        -include $(top_builddir)/config.h \
        -DSYSCONFDIR=\""$(sysconfdir)"\" \
@@ -22,7 +25,7 @@ AM_CFLAGS = ${my_CFLAGS} \
        -ffunction-sections \
        -fdata-sections
 
-AM_LDFLAGS =
+AM_LDFLAGS = ${my_LDFLAGS}
 
 # leaving a space here to work around automake's conditionals
  ifeq ($(OS),Darwin)
@@ -34,7 +37,7 @@ AM_LDFLAGS =
  endif
 
 LIBLOC_CURRENT=1
-LIBLOC_REVISION=0
+LIBLOC_REVISION=3
 LIBLOC_AGE=0
 
 DISTCHECK_CONFIGURE_FLAGS = \
@@ -51,6 +54,7 @@ SED_PROCESS = \
        -e 's,@databasedir\@,$(databasedir),g' \
        < $< > $@ || rm $@
 
+cron_dailydir = $(sysconfdir)/cron.daily
 databasedir = $(localstatedir)/lib/location
 pkgconfigdir = $(libdir)/pkgconfig
 
@@ -84,32 +88,36 @@ po/POTFILES.in: Makefile
                sed -e "s@$(abs_srcdir)/@@g" | LC_ALL=C sort > $@
 
 EXTRA_DIST += \
+       README.md \
        examples/private-key.pem \
        examples/public-key.pem \
        examples/python/create-database.py \
        examples/python/read-database.py
 
 pkginclude_HEADERS = \
-       src/loc/libloc.h \
-       src/loc/as.h \
-       src/loc/as-list.h \
-       src/loc/compat.h \
-       src/loc/country.h \
-       src/loc/country-list.h \
-       src/loc/database.h \
-       src/loc/format.h \
-       src/loc/network.h \
-       src/loc/network-list.h \
-       src/loc/private.h \
-       src/loc/stringpool.h \
-       src/loc/resolv.h \
-       src/loc/writer.h
+       src/libloc/libloc.h \
+       src/libloc/address.h \
+       src/libloc/as.h \
+       src/libloc/as-list.h \
+       src/libloc/compat.h \
+       src/libloc/country.h \
+       src/libloc/country-list.h \
+       src/libloc/database.h \
+       src/libloc/format.h \
+       src/libloc/network.h \
+       src/libloc/network-list.h \
+       src/libloc/network-tree.h \
+       src/libloc/private.h \
+       src/libloc/stringpool.h \
+       src/libloc/resolv.h \
+       src/libloc/writer.h
 
 lib_LTLIBRARIES = \
        src/libloc.la
 
 src_libloc_la_SOURCES = \
        src/libloc.c \
+       src/address.c \
        src/as.c \
        src/as-list.c \
        src/country.c \
@@ -117,6 +125,7 @@ src_libloc_la_SOURCES = \
        src/database.c \
        src/network.c \
        src/network-list.c \
+       src/network-tree.c \
        src/resolv.c \
        src/stringpool.c \
        src/writer.c
@@ -145,6 +154,24 @@ src_libloc_la_LIBADD = \
 src_libloc_la_DEPENDENCIES = \
        ${top_srcdir}/src/libloc.sym
 
+noinst_LTLIBRARIES = \
+       src/libloc-internal.la
+
+src_libloc_internal_la_SOURCES = \
+       $(src_libloc_la_SOURCES)
+
+src_libloc_internal_la_CFLAGS = \
+       $(src_libloc_la_CFLAGS)
+
+src_libloc_internal_la_LDFLAGS = \
+       $(filter-out -version-info %,$(src_libloc_la_LDFLAGS))
+
+src_libloc_internal_la_LIBADD = \
+       $(src_libloc_la_LIBADD)
+
+src_libloc_internal_la_DEPENDENCIES = \
+       $(src_libloc_la_DEPENDENCIES)
+
 pkgconfig_DATA = \
        src/libloc.pc
 
@@ -154,22 +181,21 @@ EXTRA_DIST += \
 CLEANFILES += \
        src/libloc.pc
 
-dist_pkgpython_PYTHON = \
-       src/python/database.py \
-       src/python/downloader.py \
-       src/python/export.py \
-       src/python/i18n.py \
-       src/python/importer.py \
-       src/python/logger.py
-
-pkgpython_PYTHON = \
-       src/python/__init__.py
+if BUILD_BASH_COMPLETION
+bashcompletion_DATA = \
+       bash-completion/location
+endif
 
 EXTRA_DIST += \
-       src/python/__init__.py.in
+       bash-completion/location
 
-CLEANFILES += \
-       src/python/__init__.py
+dist_pkgpython_PYTHON = \
+       src/python/location/__init__.py \
+       src/python/location/database.py \
+       src/python/location/downloader.py \
+       src/python/location/export.py \
+       src/python/location/i18n.py \
+       src/python/location/logger.py
 
 pyexec_LTLIBRARIES = \
        src/python/_location.la
@@ -202,6 +228,63 @@ src_python__location_la_LIBADD = \
        src/libloc.la \
        $(PYTHON_LIBS)
 
+# ------------------------------------------------------------------------------
+
+if ENABLE_LUA
+lua_LTLIBRARIES = \
+       src/lua/location.la
+
+luadir = $(LUA_INSTALL_CMOD)
+
+src_lua_location_la_SOURCES = \
+       src/lua/as.c \
+       src/lua/as.h \
+       src/lua/compat.h \
+       src/lua/country.c \
+       src/lua/country.h \
+       src/lua/database.c \
+       src/lua/database.h \
+       src/lua/location.c \
+       src/lua/location.h \
+       src/lua/network.c \
+       src/lua/network.h
+
+src_lua_location_la_CFLAGS = \
+       $(AM_CFLAGS) \
+       $(LUA_CFLAGS)
+
+src_lua_location_la_LDFLAGS = \
+       $(AM_LDFLAGS) \
+       $(LUA_LDFLAGS) \
+       -shared \
+       -module \
+       -avoid-version
+
+src_lua_location_la_LIBADD = \
+       src/libloc.la \
+       $(LUA_LIBS)
+endif
+
+EXTRA_DIST += \
+       src/lua/as.c \
+       src/lua/as.h \
+       src/lua/country.c \
+       src/lua/country.h \
+       src/lua/database.c \
+       src/lua/database.h \
+       src/lua/location.c \
+       src/lua/location.h \
+       src/lua/network.c \
+       src/lua/network.h
+
+LUA_TESTS = \
+       tests/lua/main.lua
+
+EXTRA_DIST += \
+       $(LUA_TESTS)
+
+# ------------------------------------------------------------------------------
+
 # Compile & install bindings
 all-local: $(foreach binding,$(BINDINGS),build-$(binding))
 check-local: $(foreach binding,$(BINDINGS),check-$(binding))
@@ -219,8 +302,7 @@ EXTRA_DIST += \
        src/perl/t/Location.t \
        src/perl/typemap
 
-.PHONY: build-perl
-build-perl:
+build-perl: src/libloc.la
        @mkdir -p $(builddir)/src/perl/{lib,t}
        @test -e $(builddir)/src/perl/Location.xs || ln -s --relative $(srcdir)/src/perl/Location.xs $(builddir)/src/perl/
        @test -e $(builddir)/src/perl/MANIFEST || ln -s --relative $(srcdir)/src/perl/MANIFEST $(builddir)/src/perl/
@@ -229,45 +311,49 @@ build-perl:
        @test -e $(builddir)/src/perl/t/Location.t || ln -s --relative $(srcdir)/src/perl/t/Location.t $(builddir)/src/perl/t/
        @test -e $(builddir)/src/perl/typemap || ln -s --relative $(srcdir)/src/perl/typemap $(builddir)/src/perl/
 
-       cd $(builddir)/src/perl && $(PERL) Makefile.PL PREFIX="$(prefix)" \
+       cd $(builddir)/src/perl && $(PERL) Makefile.PL NO_PACKLIST=1 NO_PERLLOCAL=1 \
+               INSTALLDIRS=vendor \
                INC="-I$(abs_srcdir)/src" LIBS="-L$(abs_builddir)/src/.libs -lloc"
-       cd $(builddir)/src/perl && $(MAKE) LD_RUN_PATH=
+       cd $(builddir)/src/perl && $(MAKE)
+       touch build-perl
 
 .PHONY: check-perl
-check-perl: testdata.db
+check-perl: testdata.db build-perl
        cd $(builddir)/src/perl && $(MAKE) LD_LIBRARY_PATH="$(abs_builddir)/src/.libs" test \
                database="../../$<" keyfile="$(abs_srcdir)/examples/public-key.pem"
 
 .PHONY: install-perl
-install-perl:
-       cd $(builddir)/src/perl && $(MAKE) install DESTIDR=$(DESTDIR)
+install-perl: build-perl
+       cd $(builddir)/src/perl && $(MAKE) install DESTDIR=$(DESTDIR)
 
 .PHONY: clean-perl
 clean-perl:
        cd $(builddir)/src/perl && $(MAKE) distclean
+       rm -f build-perl
 
 .PHONY: uninstall-perl
 uninstall-perl:
-       rm -rvf \
-               $(DESTDIR)/$(prefix)/lib/*/perl/*/Location.pm \
-               $(DESTDIR)/$(prefix)/lib/*/perl/*/auto/Location \
-               $(DESTDIR)/$(prefix)/lib/*/perl/*/perllocal.pod \
-               $(DESTDIR)/$(prefix)/man/man3/Location.3pm
+       rm -vf \
+               $(DESTDIR)/@PERL_MODPATH@/Location.pm \
+               $(DESTDIR)/@PERL_MODPATH@/auto/Location/Location.so \
+               $(DESTDIR)/@PERL_MANPATH@/Location.3pm
+       -rmdir $(DESTDIR)/@PERL_MODPATH@/auto/Location
 
 bin_SCRIPTS = \
-       src/python/location \
-       src/python/location-importer
+       src/scripts/location \
+       src/scripts/location-importer
 
 EXTRA_DIST += \
-       src/python/location.in \
-       src/python/location-importer.in
+       src/scripts/location.in \
+       src/scripts/location-importer.in
 
 CLEANFILES += \
-       src/python/location \
-       src/python/location-importer
+       src/scripts/location \
+       src/scripts/location-importer
 
 # ------------------------------------------------------------------------------
 
+# Use systemd timers if available
 if HAVE_SYSTEMD
 systemdsystemunit_DATA = \
        src/systemd/location-update.service \
@@ -278,16 +364,33 @@ CLEANFILES += \
 
 INSTALL_DIRS += \
        $(systemdsystemunitdir)
+
+# Otherwise fall back to cron
+else
+cron_daily_SCRIPTS = \
+       src/cron/location-update
+
+CLEANFILES += \
+       $(cron_daily_DATA)
 endif
 
 EXTRA_DIST += \
+       src/cron/location-update.in \
        src/systemd/location-update.service.in \
        src/systemd/location-update.timer.in
 
 # ------------------------------------------------------------------------------
 
 dist_database_DATA = \
-       src/signing-key.pem
+       data/database.db \
+       data/signing-key.pem
+
+install-data-hook:
+       chmod 444 $(DESTDIR)$(databasedir)/database.db
+
+.PHONY: update-database
+update-database:
+       curl https://location.ipfire.org/databases/1/location.db.xz | xz -d > data/database.db
 
 # ------------------------------------------------------------------------------
 
@@ -296,23 +399,42 @@ TESTS_CFLAGS = \
        -DLIBLOC_PRIVATE \
        -DABS_SRCDIR=\"$(abs_srcdir)\"
 
+TESTS_LDADD = \
+       src/libloc.la \
+       src/libloc-internal.la
+
+TESTS_ENVIRONMENT = \
+       LD_LIBRARY_PATH="$(abs_builddir)/src/.libs" \
+       LUA_CPATH="$(abs_builddir)/src/lua/.libs/?.so;;" \
+       PYTHONPATH=$(abs_srcdir)/src/python:$(abs_builddir)/src/python/.libs \
+       TEST_DATA_DIR="$(abs_top_srcdir)/data" \
+       TEST_DATABASE="$(abs_top_srcdir)/data/database.db" \
+       TEST_SIGNING_KEY="$(abs_top_srcdir)/data/signing-key.pem"
+
 TESTS = \
-       src/test-libloc \
-       src/test-stringpool \
-       src/test-database \
-       src/test-as \
-       src/test-network \
-       src/test-country \
-       src/test-signature
+       $(check_PROGRAMS) \
+       $(check_SCRIPTS) \
+       $(dist_check_SCRIPTS)
 
 CLEANFILES += \
        testdata.db
 
 testdata.db: examples/python/create-database.py
-       PYTHONPATH=$(abs_builddir)/src/python/.libs \
+       PYTHONPATH=$(abs_srcdir)/src/python:$(abs_builddir)/src/python/.libs \
        ABS_SRCDIR="$(abs_srcdir)" \
                $(PYTHON) $< $@
 
+dist_check_SCRIPTS = \
+       tests/python/country.py \
+       tests/python/networks-dedup.py \
+       tests/python/test-database.py \
+       tests/python/test-export.py
+
+if ENABLE_LUA
+check_SCRIPTS += \
+       $(LUA_TESTS)
+endif
+
 check_PROGRAMS = \
        src/test-libloc \
        src/test-stringpool \
@@ -321,7 +443,8 @@ check_PROGRAMS = \
        src/test-network \
        src/test-network-list \
        src/test-country \
-       src/test-signature
+       src/test-signature \
+       src/test-address
 
 src_test_libloc_SOURCES = \
        src/test-libloc.c
@@ -330,7 +453,7 @@ src_test_libloc_CFLAGS = \
        $(TESTS_CFLAGS)
 
 src_test_libloc_LDADD = \
-       src/libloc.la
+       $(TESTS_LDADD)
 
 src_test_as_SOURCES = \
        src/test-as.c
@@ -339,7 +462,7 @@ src_test_as_CFLAGS = \
        $(TESTS_CFLAGS)
 
 src_test_as_LDADD = \
-       src/libloc.la
+       $(TESTS_LDADD)
 
 src_test_country_SOURCES = \
        src/test-country.c
@@ -348,7 +471,7 @@ src_test_country_CFLAGS = \
        $(TESTS_CFLAGS)
 
 src_test_country_LDADD = \
-       src/libloc.la
+       $(TESTS_LDADD)
 
 src_test_network_SOURCES = \
        src/test-network.c
@@ -357,7 +480,7 @@ src_test_network_CFLAGS = \
        $(TESTS_CFLAGS)
 
 src_test_network_LDADD = \
-       src/libloc.la
+       $(TESTS_LDADD)
 
 src_test_network_list_SOURCES = \
        src/test-network-list.c
@@ -366,7 +489,7 @@ src_test_network_list_CFLAGS = \
        $(TESTS_CFLAGS)
 
 src_test_network_list_LDADD = \
-       src/libloc.la
+       $(TESTS_LDADD)
 
 src_test_stringpool_SOURCES = \
        src/test-stringpool.c
@@ -375,7 +498,7 @@ src_test_stringpool_CFLAGS = \
        $(TESTS_CFLAGS)
 
 src_test_stringpool_LDADD = \
-       src/libloc.la
+       $(TESTS_LDADD)
 
 src_test_database_SOURCES = \
        src/test-database.c
@@ -384,7 +507,7 @@ src_test_database_CFLAGS = \
        $(TESTS_CFLAGS)
 
 src_test_database_LDADD = \
-       src/libloc.la
+       $(TESTS_LDADD)
 
 src_test_signature_SOURCES = \
        src/test-signature.c
@@ -393,16 +516,43 @@ src_test_signature_CFLAGS = \
        $(TESTS_CFLAGS)
 
 src_test_signature_LDADD = \
-       src/libloc.la
+       $(TESTS_LDADD)
+
+src_test_address_SOURCES = \
+       src/test-address.c
+
+src_test_address_CFLAGS = \
+       $(TESTS_CFLAGS)
+
+src_test_address_LDADD = \
+       $(TESTS_LDADD)
 
 # ------------------------------------------------------------------------------
 
 MANPAGES = \
-       man/location.8
-
-MANPAGES_TXT  = $(patsubst %.8,%.txt,$(MANPAGES))
-MANPAGES_HTML = $(patsubst %.txt,%.html,$(MANPAGES_TXT))
-MANPAGES_XML  = $(patsubst %.txt,%.xml,$(MANPAGES_TXT))
+       $(MANPAGES_3) \
+       $(MANPAGES_1)
+
+MANPAGES_3 = \
+       man/libloc.3 \
+       man/loc_database_count_as.3 \
+       man/loc_database_get_as.3 \
+       man/loc_database_get_country.3 \
+       man/loc_database_lookup.3 \
+       man/loc_database_new.3 \
+       man/loc_get_log_priority.3 \
+       man/loc_new.3 \
+       man/loc_set_log_fn.3 \
+       man/loc_set_log_priority.3
+
+MANPAGES_1 = \
+       man/location.1
+
+MANPAGES_TXT   = $(MANPAGES_TXT_3) $(MANPAGES_TXT_1)
+MANPAGES_TXT_3 = $(patsubst %.3,%.txt,$(MANPAGES_3))
+MANPAGES_TXT_1 = $(patsubst %.1,%.txt,$(MANPAGES_1))
+MANPAGES_HTML  = $(patsubst %.txt,%.html,$(MANPAGES_TXT))
+MANPAGES_XML   = $(patsubst %.txt,%.xml,$(MANPAGES_TXT))
 
 .PHONY: man
 man: $(MANPAGES) $(MANPAGES_HTML)
@@ -445,7 +595,10 @@ man/%.xml: man/%.txt man/asciidoc.conf
                -f $(abs_srcdir)/man/asciidoc.conf \
                -d manpage -b docbook -o $@ $<
 
-man/%.8: man/%.xml
+man/%.3: man/%.xml
+       $(XSLTPROC_COMMAND_MAN)
+
+man/%.1: man/%.xml
        $(XSLTPROC_COMMAND_MAN)
 
 man/%.html: man/%.txt man/asciidoc.conf
@@ -458,19 +611,27 @@ man/%.html: man/%.txt man/asciidoc.conf
 upload-man: $(MANPAGES_HTML)
        rsync -avHz --delete --progress $(MANPAGES_HTML) ms@fs01.haj.ipfire.org:/pub/man-pages/$(PACKAGE_NAME)/
 
+EXTRA_DIST += \
+       tools/copy.py
+
 EXTRA_DIST += \
        debian/build.sh \
        debian/changelog \
-       debian/compat \
        debian/control \
        debian/copyright \
-       debian/location.install \
-       debian/location.manpages \
-       debian/location-python.install \
+       debian/genchangelog.sh \
+       debian/gensymbols.sh \
        debian/libloc1.install \
+       debian/libloc1.symbols \
        debian/libloc-dev.install \
+       debian/location.install \
+       debian/location.postinst \
+       debian/location.postrm \
+       debian/python3-location.examples \
+       debian/python3-location.install \
        debian/rules \
-       debian/source/format
+       debian/source/format \
+       debian/watch
 
 .PHONY: debian
 debian: dist
diff --git a/README.md b/README.md
new file mode 100644 (file)
index 0000000..36c8144
--- /dev/null
+++ b/README.md
@@ -0,0 +1,47 @@
+# **_`libloc`_** - IP Address Location
+
+[Home](https://www.ipfire.org/location)
+
+`libloc` is a library for fast and efficient IP address location.
+
+It offers:
+
+- **The Fastest Lookups**: O(1) lookup time for IP addresses using a binary tree structure.
+- **Low Memory Footprint**: The database is packed in a very efficient format.
+- **Security**: Integrated signature verification for data integrity.
+- **Maintainability**: Automatic updates.
+- **Standalone**: No external dependencies, easy to integrate.
+
+`libloc` is ideal for:
+
+- Firewalls
+- Intrusion Prevention/Detection Systems (IPS/IDS)
+- Web Applications
+- Network Management Tools
+
+The publicly available daily updated database stores information about:
+
+- The entire IPv6 and IPv4 Internet
+- Autonomous System Information including names
+- Country Codes, Names and Continent Codes
+
+## Command Line
+
+`libloc` comes with a command line tool which makes it easy to test the library or
+integrate it into your shell scripts. location(8) knows a couple of commands to retrieve
+country or Autonomous System of an IP address and can generate lists of networks to be
+imported into other software.
+
+`location (8)` is versatile and very easy to use.
+
+## Language Bindings
+
+`libloc` itself is written in C. There are bindings for the following languages available:
+
+- Python 3
+- Lua
+- Perl
+
+`libloc` comes with native Python bindings which are used by its main command-line tool
+location. They are the most advanced bindings as they support reading from the database
+as well as writing to it.
diff --git a/bash-completion/location b/bash-completion/location
new file mode 100644 (file)
index 0000000..66fc6c1
--- /dev/null
@@ -0,0 +1,151 @@
+# location(1) completion                                   -*- shell-script -*-
+#
+# bash-completion - part of libloc
+#
+# Copyright (C) 2020,2023 Hans-Christoph Steiner <hans@eds.org>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 2.1 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+__location_init() {
+    if type -t _init_completion >/dev/null; then
+        _init_completion -n : || return
+    else
+       # manual initialization for older bash completion versions
+       COMPREPLY=()
+       cur="${COMP_WORDS[COMP_CWORD]}"
+       prev="${COMP_WORDS[COMP_CWORD-1]}"
+    fi
+
+    (( $# >= 1 )) && __complete_${1}
+    __ltrim_colon_completions "$cur"
+}
+
+__complete_options() {
+    case "${prev}" in
+        --directory)
+            _filedir -d
+            return 0;;
+        --cron)
+           COMPREPLY=( $( compgen -W "daily weekly monthly" -- $cur ) )
+            return 0;;
+       --family)
+           COMPREPLY=( $( compgen -W "ipv6 ipv4" -- $cur ) )
+            return 0;;
+        --format)
+           COMPREPLY=( $( compgen -W "ipset list nftables xt_geoip" -- $cur ) )
+            return 0;;
+    esac
+
+    case "$cur" in
+       -*)
+           COMPREPLY=( $( compgen -W "--help ${lopts}" -- $cur ) )
+           return 0;;
+    esac
+}
+
+__complete_dump() {
+    __complete_options
+}
+
+__complete_get_as() {
+    __complete_options
+}
+
+__complete_export() {
+    lopts="--directory --family --format"
+    __complete_options
+}
+
+__complete_list_networks_by_as() {
+    lopts="--family --format"
+    __complete_options
+}
+
+__complete_list_networks_by_cc() {
+    lopts="--family --format"
+    __complete_options
+}
+
+__complete_list_networks_by_flags() {
+    lopts="--anonymous-proxy --satellite-provider --anycast --drop --family --format"
+    __complete_options
+}
+
+__complete_list_bogons() {
+    lopts="--family --format"
+    __complete_options
+}
+
+__complete_list_countries() {
+    lopts="--show-name --show-continent"
+    __complete_options
+}
+
+__complete_lookup() {
+    __complete_options
+}
+
+__complete_search_as() {
+    __complete_options
+}
+
+__complete_update() {
+    lopts="--cron"
+    __complete_options
+}
+
+__complete_version() {
+    __complete_options
+}
+
+__complete_verify() {
+    __complete_options
+}
+
+# for f in `location|grep -Eo '[a-z,-]+,[a-z,-]+'| sed 's/,/ /g'`; do printf '%s \\\n' $f; done|sort -u
+__cmds=" \
+dump \
+export \
+get-as \
+list-bogons \
+list-countries \
+list-networks-by-as \
+list-networks-by-cc \
+list-networks-by-flags \
+lookup \
+search-as \
+update \
+verify \
+version \
+"
+
+for c in $__cmds; do
+    eval "_location_${c} () {
+                local cur prev lopts
+                __location_init ${c//-/_}
+        }"
+done
+
+_location() {
+    local cmd
+    cmd=${COMP_WORDS[1]}
+
+    [[ $__cmds == *\ $cmd\ * ]] && _location_${cmd} || {
+            (($COMP_CWORD == 1)) && COMPREPLY=( $( compgen -W "${__cmds}" -- $cmd ) )
+        }
+}
+
+complete -F _location location
+
+return 0
index cc9de27a680946da4f01efba4436859b6c41c142..f84cc9228f7f2cfd0edb21aaa0e549b6a22ae2cc 100644 (file)
@@ -1,6 +1,6 @@
 AC_PREREQ(2.60)
 AC_INIT([libloc],
-        [0.9.6],
+        [0.9.17],
         [location@lists.ipfire.org],
         [libloc],
         [https://location.ipfire.org/])
@@ -18,7 +18,7 @@ AM_INIT_AUTOMAKE([
        dist-xz
        subdir-objects
 ])
-AC_PROG_CC_STDC
+AC_PROG_CC
 AC_USE_SYSTEM_EXTENSIONS
 AC_SYS_LARGEFILE
 AC_CONFIG_MACRO_DIR([m4])
@@ -55,6 +55,41 @@ AC_CHECK_PROGS(ASCIIDOC, [asciidoc])
 if test "${have_man_pages}" = "yes" && test -z "${ASCIIDOC}"; then
        AC_MSG_ERROR([Required program 'asciidoc' not found])
 fi
+
+# - pkg-config -----------------------------------------------------------------
+
+m4_ifndef([PKG_PROG_PKG_CONFIG],
+       [m4_fatal([Could not locate the pkg-config autoconf
+               macros. These are usually located in /usr/share/aclocal/pkg.m4.
+               If your macros are in a different location, try setting the
+               environment variable AL_OPTS="-I/other/macro/dir" before running
+               ./autogen.sh or autoreconf again. Make sure pkg-config is installed.])])
+
+PKG_PROG_PKG_CONFIG
+PKG_INSTALLDIR(['${usrlib_execdir}/pkgconfig'])
+
+# - bash-completion ------------------------------------------------------------
+
+#enable_bash_completion=yes
+AC_ARG_WITH([bashcompletiondir],
+       AS_HELP_STRING([--with-bashcompletiondir=DIR], [Bash completions directory]),
+       [],
+       [AS_IF([`$PKG_CONFIG --exists bash-completion`], [
+               with_bashcompletiondir=`$PKG_CONFIG --variable=completionsdir bash-completion`
+       ], [
+               with_bashcompletiondir=${datadir}/bash-completion/completions
+       ])
+])
+
+AC_SUBST([bashcompletiondir], [$with_bashcompletiondir])
+
+AC_ARG_ENABLE([bash-completion],
+       AS_HELP_STRING([--disable-bash-completion], [do not install bash completion files]),
+       [], [enable_bash_completion=yes]
+)
+
+AM_CONDITIONAL([BUILD_BASH_COMPLETION], [test "x$enable_bash_completion" = xyes])
+
 # - debug ----------------------------------------------------------------------
 
 AC_ARG_ENABLE([debug],
@@ -81,6 +116,7 @@ AC_CHECK_FUNCS([ \
     htobe16 \
     htobe32 \
     htobe64 \
+    madvise \
     mmap \
     munmap \
     res_query \
@@ -103,9 +139,35 @@ my_CFLAGS="\
 -Wtype-limits \
 "
 AC_SUBST([my_CFLAGS])
+AC_SUBST([my_LDFLAGS])
+
+# Enable -fanalyzer if requested
+AC_ARG_ENABLE([analyzer],
+       AS_HELP_STRING([--enable-analyzer], [enable static analyzer (-fanalyzer) @<:@default=disabled@:>@]),
+       [], [enable_analyzer=no])
+AS_IF([test "x$enable_analyzer" = "xyes"],
+       CC_CHECK_FLAGS_APPEND([my_CFLAGS], [CFLAGS], [-fanalyzer])
+)
+
+# Enable -fno-semantic-interposition (if available)
+CC_CHECK_FLAGS_APPEND([my_CFLAGS], [CFLAGS], [-fno-semantic-interposition])
+CC_CHECK_FLAGS_APPEND([my_LDFLAGS], [LDFLAGS], [-fno-semantic-interposition])
 
 # ------------------------------------------------------------------------------
 
+AC_ARG_WITH([database-path],
+       AS_HELP_STRING([--with-database-path], [The default database path]),
+       [], [with_database_path=/var/lib/location/database.db]
+)
+
+if test -z "${with_database_path}"; then
+       AC_MSG_ERROR([The default database path is empty])
+fi
+
+AC_DEFINE_UNQUOTED([LIBLOC_DEFAULT_DATABASE_PATH], ["${with_database_path}"],
+       [The default path for the database])
+AC_SUBST([DEFAULT_DATABASE_PATH], [${with_database_path}])
+
 AC_ARG_WITH([systemd],
        AS_HELP_STRING([--with-systemd], [Enable systemd support.])
 )
@@ -140,18 +202,47 @@ AM_CONDITIONAL(HAVE_SYSTEMD, [test "x$have_systemd" = "xyes"])
 
 # ------------------------------------------------------------------------------
 
+AC_PATH_PROG(PKG_CONFIG, pkg-config, no)
+
 # Python
 AM_PATH_PYTHON([3.4])
 PKG_CHECK_MODULES([PYTHON], [python-${PYTHON_VERSION}])
 
+# Lua
+AC_ARG_ENABLE(lua,
+       AS_HELP_STRING([--disable-lua], [do not build the Lua modules]), [], [enable_lua=yes])
+
+AM_CONDITIONAL(ENABLE_LUA, test "$enable_lua" = "yes")
+
+AS_IF(
+       [test "$enable_lua" = "yes"], [
+               PKG_CHECK_MODULES([LUA], [lua])
+
+               AX_PROG_LUA_MODULES([luaunit],, [AC_MSG_ERROR([Lua modules are missing])])
+
+               LUA_INSTALL_LMOD=$($PKG_CONFIG --define-variable=prefix=${prefix} --variable=INSTALL_LMOD lua)
+               AC_SUBST(LUA_INSTALL_LMOD)
+               LUA_INSTALL_CMOD=$($PKG_CONFIG --define-variable=prefix=${prefix} --variable=INSTALL_CMOD lua)
+               AC_SUBST(LUA_INSTALL_CMOD)
+       ],
+)
+
 # Perl
 AC_PATH_PROG(PERL, perl, no)
 AC_SUBST(PERL)
 
-AX_PROG_PERL_MODULES(ExtUtils::MakeMaker,, AC_MSG_WARN(Need some Perl modules))
+AX_PROG_PERL_MODULES(Config ExtUtils::MakeMaker,, AC_MSG_WARN(Need some Perl modules))
 
 AC_ARG_ENABLE(perl, AS_HELP_STRING([--disable-perl], [do not build the perl modules]), [],[enable_perl=yes])
 AM_CONDITIONAL(ENABLE_PERL, test "$enable_perl" = "yes")
+AS_IF([test "$enable_perl" = "yes"],
+      [
+       PERL_MODPATH=$($PERL -MConfig -e 'print $Config{installvendorarch}')
+       PERL_MANPATH=$($PERL -MConfig -e 'print $Config{installvendorman3dir}')
+       AC_SUBST(PERL_MODPATH)
+       AC_SUBST(PERL_MANPATH)
+       ],
+)
 
 dnl Checking for libresolv
 case "${host}" in
@@ -191,9 +282,16 @@ AC_MSG_RESULT([
         cflags:                 ${CFLAGS}
         ldflags:                ${LDFLAGS}
 
+        database path:          ${with_database_path}
         debug:                  ${enable_debug}
         systemd support:        ${have_systemd}
+       bash-completion:        ${enable_bash_completion}
 
        Bindings:
-         perl:                 ${enable_perl}
+         Lua:                  ${enable_lua}
+         Lua shared path:      ${LUA_INSTALL_LMOD}
+         Lua module path:      ${LUA_INSTALL_CMOD}
+         Perl:                 ${enable_perl}
+         Perl module path:     ${PERL_MODPATH}
+         Perl manual path:     ${PERL_MANPATH}
 ])
diff --git a/data/database.db b/data/database.db
new file mode 100644 (file)
index 0000000..86e7e42
Binary files /dev/null and b/data/database.db differ
similarity index 100%
rename from src/signing-key.pem
rename to data/signing-key.pem
old mode 100644 (file)
new mode 100755 (executable)
index 4385455..a1f6b6c
@@ -1,7 +1,7 @@
 #!/bin/bash
 
 ARCHITECTURES=( amd64 arm64 i386 armhf )
-RELEASES=( buster bullseye sid )
+RELEASES=( buster bullseye bookworm sid )
 
 CHROOT_PATH="/var/tmp"
 
@@ -38,6 +38,9 @@ main() {
     local release
     for release in ${RELEASES[@]}; do
         local chroot="${release}-${host_arch}-sbuild"
+        if [ "${release}" = "buster" ]; then
+            local buster_backport=( --extra-repository "deb http://deb.debian.org/debian buster-backports main" --build-dep-resolver=aspcud )
+        fi
 
         mkdir -p "${release}"
         pushd "${release}"
@@ -62,7 +65,7 @@ main() {
             cp -r "${tmp}/sources" .
 
             # Run the build process
-            if ! sbuild --dist="${release}" --host="${arch}" --source "sources/${package}"; then
+            if ! sbuild --dist="${release}" --host="${arch}" --source "${buster_backport[@]}" "sources/${package}"; then
                 echo "Could not build package for ${release} on ${arch}" >&2
                 rm -rf "${tmp}"
                 return 1
index e58c0cadffdfbc85f925245af4fd9a89b41439c6..de26894c2992bf03b0898fd6bc25eec14704c300 100644 (file)
+libloc (0.9.17-1) unstable; urgency=medium
+
+  [ Michael Tremer ]
+  * importer: Store geofeed URLs from RIR data
+  * importer: Add command to import geofeeds into the database
+  * importer: Just fetch any exception from the executor
+  * importer: Sync geofeeds
+  * importer: Use geofeeds for country assignment
+  * importer: Use a GIST index for networks from geofeeds
+  * importer: Add a search index match geofeed networks quicker
+  * importer: Fix reading Geofeeds from remarks
+  * importer: Ensure that we only use HTTPS URLs for Geofeeds
+  * importer: Validate country codes from Geofeeds
+  * importer: Fix parsing gzipped content on invalid Content-Type header
+  * po: Update translations
+  * network: Drop an unused function to count all networks
+  * location: Fix correct set name when family is selected
+  * export: Raise an error when trying to export ipset for both families
+  * Merge remote-tracking branch 'origin/geofeed'
+  * importer: Drop method to import routing information from route
+    servers
+  * importer: Silently ignore any empty country codes in Geofeeds
+  * importer: Convert country codes to uppercase from Geofeeds
+  * importer: Skip lines we cannot decode
+  * importer: Silence invalid country code warning
+  * importer: Catch TimeoutError when loading Geofeeds
+  * importer: Log any errors to the database
+  * geofeeds: Delete any data on 404
+  * geofeeds: Delete any data that did not update within two weeks
+  * geofeeds: Catch any invalid URLs
+  * database: Log query execution time in debug mode
+  * importer: Improve performance of AS name export query
+  * geofeed: Parse and normalize any URLs
+  * importer: AWS: Add country code of NZ for ap-southeast-5
+  * importer: Don't write AS without names into the database
+  * importer: Decrease the log level if Spamhaus' files are empty
+  * tree: Add flag to delete nodes
+  * writer: Cleanup networks before writing
+  * tree: Actually delete any deleted nodes
+  * Merge networks before writing the database
+  * networks: Delete networks from the tree on merge
+  * tree: More elegantly prevent deleting the root node
+  * network: Decreate log level when deleting networks
+  * data: Update database to 2023-07-31
+  * configure: Bump version to 0.9.17
+
+  [ Temuri Doghonadze ]
+  * po: Add Georgian translation
+
+  [ Hans-Christoph Steiner ]
+  * Add bash-completion file for the location command.
+
+  [ Stefan Schantl ]
+  * Install bash-completion files.
+
+  [ Petr Písař ]
+  * Fix string escaping in location tool
+
+ -- Michael Tremer <michael.tremer@ipfire.org>  Mon, 31 Jul 2023 16:59:38 +0000
+
+libloc (0.9.16-1) unstable; urgency=medium
+
+  [ Peter Müller ]
+  * location-importer.in: Conduct sanity checks per DROP list
+  * location-importer.in: Add new Amazon region codes
+
+  [ Michael Tremer ]
+  * importer: Fix potential SQL command injection
+  * configure: Fix incorrect database path
+  * python: Export __version__ in location module
+  * writer: Add an empty string to the stringpool
+  * export: Fix generating file names for ipset output
+  * database: Ship a recent default database
+  * tests: Drop the test database and use the distributed one
+  * database: Correct error code on verification
+  * writer: Fix typo in signature variable
+  * writer: Assign correct file descriptor for private keys
+  * database: Fix check if a signature is set
+  * configure: Drop superfluous bracket
+  * configure: Bump version to 0.9.16
+
+  [ Petr Písař ]
+  * Move location manual from section 8 to section 1
+  * Remove shebangs from Python modules
+  * Move location manual from section 8 to section 1 in location-
+    update.service
+  * Install Perl files to Perl vendor directory
+
+ -- Michael Tremer <michael.tremer@ipfire.org>  Sat, 29 Oct 2022 13:25:36 +0000
+
+libloc (0.9.15-1) unstable; urgency=medium
+
+  [ Peter Müller ]
+  * Non-maintainer upload.
+  * location-importer.in: Fix dangling variable
+
+  [ Michael Tremer ]
+  * Replace strerror(errno) with %m in format string throughout
+  * Don't abuse errno as return code
+  * country: Refactor storing country code and continent code
+  * *_unref: Always expect a valid pointer
+  * cron: Add a cronjob if systemd is not available
+  * Check return value of fread() when reading header
+  * configure: Replace obsolete AC_PROG_CC_STDC macro
+  * writer: Check if stringpool has been initialized before free
+  * database: Use MAP_PRIVATE with mmap()
+  * database: Do not try to unmap failed mappings
+  * database: Log any errors when mmap() fails
+  * database: Increase page size to 64k
+  * python: Correctly raise any errors when opening the database
+  * database: Improve error reporting when the magic cannot be read
+  * python: Fix errors for Database.lookup()
+  * stringpool: Implement mmap as optional
+  * database: Fall back when mmap() isn't available
+  * tests: Add some simple database tests
+  * stringpool: Drop function to find next offset
+  * database: country: Return better error codes
+  * python: Export DatabaseEnumerator type
+  * tests: database: Expand test coverage
+  * database: Refactor error handling on create
+  * database: Break opening procedure into smaller parts
+  * database: Refactor checking magic
+  * database: Check if this version of libloc supports the database
+    format
+  * database: Map the entire database into memory as a whole
+  * database: Read header from mapped data
+  * hexdump: Don't try to dump any empty memory
+  * database: Read all data from the large mmap()
+  * database: Call madvise() to tell the kernel that we will randomly
+    access the data
+  * database: Encourage the compiler to inline some functions
+  * database: Drop unused offset variable in objects
+  * database: Drop debug line
+  * database: Initialize r on create
+  * tests: Add signing key to verify signatures
+  * configure: Check for madvise
+  * Fix compilation on MacOS X
+  * country: Drop unused CC_LEN
+  * tests: country: Don't crash when a country could not be found
+  * Revert "database: Increase page size to 64k"
+  * writer: Flush everything to disk after writing finishes
+  * configure: Make the default database path configurable
+  * python: Add new open() interface to easily open a database
+
+ -- Michael Tremer <michael.tremer@ipfire.org>  Mon, 26 Sep 2022 15:36:44 +0000
+
+libloc (0.9.14-1) unstable; urgency=medium
+
+  [ Michael Tremer ]
+  * Revert "configure: Require Python >= 3.9"
+  * Make sources around that we can run tests without location installed
+  * downloader: Fetch __version__ from C module
+  * export: Drop using functools
+  * verify: Show message on success
+  * export: Don't fail when output stream isn't seekable
+  * importer: Actually perform the Spamhaus sanity check
+  * importer: Change download behaviour
+  * importer: Move importing extended sources/ARIN into transaction
+  * python: database: Return None if no description/vendor/license set
+  * importer: Try to make parsing blocks faster
+  * importer: Import each source individually
+  * python: Fix missing bracket
+  * importer: Tolerate that data might exist from other RIRs
+  * importer: Import all sources in alphabetical order
+
+  [ Peter Müller ]
+  * location-importer: Only delete override data if we are sure to have
+    a valid replacement
+  * location-importer: AS names starting with "DNIC" actually are valid
+
+ -- Michael Tremer <michael.tremer@ipfire.org>  Sun, 14 Aug 2022 12:24:16 +0000
+
+libloc (0.9.13-1) unstable; urgency=medium
+
+  [ Michael Tremer ]
+  * tests: Add a simple test that lists all networks
+  * database: Allocate subnets list only once
+  * network: Log a more useful message on invalid prefix
+  * network: Add more debugging output when running exclude
+  * network: loc_network_subnets: Use correct prefix
+  * tests: Break after exporting 1000 networks
+  * configure: Require Python >= 3.9
+  * export: Enable flattening for everything
+  * .gitignore: Ignore *.db files only in main directory
+  * tests: Import test database
+  * configure: Bump version to 0.9.13
+
+ -- Michael Tremer <michael.tremer@ipfire.org>  Tue, 12 Apr 2022 12:15:34 +0000
+
+libloc (0.9.12-1) unstable; urgency=medium
+
+  [ Michael Tremer ]
+  * importer: Parse aggregated networks
+  * database: Return something when no filter criteria is configured
+  * importer: Correctly hande response codes from Bird
+  * importer: Silently ignore any table headers
+  * importer: Skip empty lines
+  * location: Fix output of list-* commands
+  * network: Move a couple of helper functions into headers
+  * network: Add function that counts the bit length of an addres
+  * network: Drop functions moved in an earlier commit
+  * network-list: Rewrite summarize algorithm
+  * network: Allow creating any valid networks
+  * network: Implement bit length function for IPv4
+  * addresses: Implement subtraction for IPv4
+  * bogons: Refactor algorithms
+  * network-list: Cap prefix length based on family
+  * address: Correctly subtract IPv4 addresses
+  * bogons: Reset after we have reached the end
+  * bogons: Don't consider a network legitimate without a country code
+  * Move all address convenience functions into their own header
+  * address: Rename in6_addr_cmp into loc_address_cmp
+  * address: Rename in6_addr_get_bit/in6_addr_set_bit to loc_address_*
+  * addresses: Use loc_address_family which is now available
+  * address: Rename increment/decrement functions and modify address in
+    place
+  * network: Pass prefix in native length
+  * strings: Statically allocate all address/network strings
+  * address: Initialize all bits of IP addresses
+  * address: Prevent under/overflow when incrementing/decrementing
+  * network-list: Simplify debugging output on summarize
+  * network-list: summarize: Break when we exhausted the network range
+  * network-list: Remove debugging line
+  * address: Simplify functions
+  * address: Fix decrementing IP addresses
+  * address: Fix buffer overwrite
+  * address: Add some simple tests
+  * bogons: Add gaps that are only one address wide
+  * bogons: Skip any subnets of former networks
+  * as-list: Grow faster to avoid too many re-allocations
+  * writer: Use AS list internally
+  * network-list: Grow just like the AS list
+  * country-list: Grow like AS list
+  * writer: Use country list internally
+  * importer: Improve performance of network export query
+  * writer: I forgot to initalize the country list
+  * Refactor parsing IP addresses
+  * address: Set default prefix if none is given
+
+ -- Michael Tremer <michael.tremer@ipfire.org>  Wed, 23 Mar 2022 20:11:29 +0000
+
+libloc (0.9.11-1) unstable; urgency=medium
+
+  [ Stefan Schantl ]
+  * export: Remove prefix when exporting countries.
+
+  [ Michael Tremer ]
+  * ipset: Optimise hash table size
+  * ipset: Fix hash type for IPv6
+  * ipset: Set maxelem to a fixed size
+  * export: Conditionally enable flattening
+  * location: Print proper error message for any uncaught exceptions
+  * export: Allow exporting to stdout
+  * ipset: The minimum hashsize is 64
+  * export: Fix filtering logic
+  * export: Sightly refactor export logic
+  * Bump release to 0.9.11
+
+  [ Peter Müller ]
+  * location-importer: Fix parsing LACNIC-flavoured inetnums
+
+ -- Michael Tremer <michael.tremer@ipfire.org>  Thu, 03 Mar 2022 10:44:44 +0000
+
+libloc (0.9.10-1) unstable; urgency=medium
+
+  [ Peter Müller ]
+  * Non-maintainer upload.
+  * location-importer: Set "is_drop" to "True" even in case of conflicts
+  * Process LACNIC geofeed as well
+  * location-importer: Improve regex for catching historic/orphaned data
+  * location-importer: Replace "UK" with "GB"
+  * location-importer.in: Add country code for AWS's "il-central-1" zone
+  * location-importer.in: Do not make things more complicated than they
+    are
+
+  [ Michael Tremer ]
+  * man: Add pages for top level functions
+  * man: Add man page for loc_database_new
+  * man: Add man pages for all loc_database_* functions
+  * export: Make ipset files easily reloadable
+
+ -- Michael Tremer <michael.tremer@ipfire.org>  Wed, 16 Feb 2022 08:53:48 +0000
+
+libloc (0.9.9-2) unstable; urgency=medium
+
+  * Fix broken Debian build
+
+ -- Michael Tremer <michael.tremer@ipfire.org>  Tue, 23 Nov 2021 11:07:22 +0000
+
+libloc (0.9.9-1) unstable; urgency=medium
+
+  [ Michael Tremer ]
+  * database: Make IP address const for lookup
+  * configure: Enable -fno-semantic-interposition by default
+  * network: Drop redundant loc_network_match_flag
+  * network: Drop useless loc_network_match_asn function
+  * stringpool: Make functions properly private
+  * Make loc_network_tree_* functions propertly private
+  * Remove LOC_EXPORT from
+    loc_network_to_database_v1/loc_network_new_from_database_v1
+  * country: Add function that returns flags for special country
+  * country: Make country codes beginning with X invalid
+  * network: Make loc_network_match_country_code match special countries
+  * network: Rename "match" functions to "matches"
+
+  [ Peter Müller ]
+  * location.txt: Improve manpage
+  * importer.py: Import JPNIC feed as well
+  * location-importer: Introduce auxiliary function to sanitise ASNs
+  * location-importer.in: Add Spamhaus DROP lists
+
+ -- Michael Tremer <michael.tremer@ipfire.org>  Sat, 20 Nov 2021 15:12:28 +0000
+
+libloc (0.9.8-1) unstable; urgency=medium
+
+  [ Michael Tremer ]
+  * importer: Do not try to initialise a column that cannot be NULL with
+    NULL
+  * configure: Add option to enable GCC's -fanalyzer
+  * writer: Break when a network cound not be allocated
+  * stringpool: Allow adding empty strings
+  * stringpool: Do not call strlen() on potential NULL pointer
+  * stringpool: Slightly refactor initialization to help the compiler
+    understand
+  * stringpool: Avoid memory leak if mmap() fails
+  * network: Move some helper functions into network.h
+  * python: Permit passing family to database enumerator
+  * location: Implement listing bogons
+  * Move include files to /usr/include/libloc
+
+  [ Peter Müller ]
+  * location-importer.in: Attempt to provide meaningful AS names if
+    organisation handles are missing
+  * location-importer.in: Braindead me accidentally forgot a "break"
+    statement
+
+ -- Michael Tremer <michael.tremer@ipfire.org>  Tue, 21 Sep 2021 10:29:11 +0000
+
+libloc (0.9.7-1) unstable; urgency=medium
+
+  [ Valters Jansons ]
+  * po: Update translations
+  * systemd: Add Documentation= to location-update
+
+  [ Peter Müller ]
+  * location-importer.in: emit warnings due to unknown country code for
+    valid networks only
+  * location.in: fix search_networks() function call
+  * location-importer.in: keep track of sources for networks, ASNs, and
+    organisations
+  * importer.py: add source information for RIR data feeds
+  * location-importer.in: track original countries as well
+  * location-importer.in: track original countries more pythonic
+  * Implement an additional flag for hostile networks safe to drop
+  * location-importer.in: Import (technical) AS names from ARIN
+  * location-importer.in: add source column for overrides as well
+  * location-importer.in: import additional IP information for Amazon
+    AWS IP networks
+  * location-import.in: optimise regular expression for filtering ASN
+    allocations to other RIRs when parsing ARIN AS names file
+
+  [ Michael Tremer ]
+  * countries: Fix matching invalid country codes
+  * Bump version to 0.9.7
+
+ -- Michael Tremer <michael.tremer@ipfire.org>  Fri, 09 Jul 2021 17:16:59 +0000
+
 libloc (0.9.6-1) unstable; urgency=medium
 
-  * location-importer.in: skip networks with unknown country codes
-  * location-importer.in: process unaligned IP ranges in RIR data files
-    correctly
+  [ Michael Tremer ]
+  * location: Fix list-networks-by-as
   * database: Free mmapped countries section
-  * location-importer.in: reduce log noise for unusable networks
-  * location-importer.in: delete 6to4 IPv6 space as well
+
+  [ Peter Müller ]
   * location-importer.in: fix typo
-  * location: Fix list-networks-by-as
+  * location-importer.in: delete 6to4 IPv6 space as well
+  * location-importer.in: reduce log noise for unusable networks
+  * location-importer.in: process unaligned IP ranges in RIR data files
+    correctly
+  * location-importer.in: skip networks with unknown country codes
 
  -- Michael Tremer <michael.tremer@ipfire.org>  Wed, 31 Mar 2021 14:06:00 +0100
 
diff --git a/debian/compat b/debian/compat
deleted file mode 100644 (file)
index f599e28..0000000
+++ /dev/null
@@ -1 +0,0 @@
-10
index 4b1407adb837b8b81c4647f9046e61185e89c544..918c0f6aef940fabe4a83c6211b2d52848663d5a 100644 (file)
@@ -1,40 +1,40 @@
 Source: libloc
 Maintainer: Stefan Schantl <stefan.schantl@ipfire.org>
-Section: misc
+Section: net
 Priority: optional
-Standards-Version: 4.3.0
+Standards-Version: 4.6.1
 Build-Depends:
- debhelper (>= 11),
- dh-python <!nopython>,
- asciidoc <!nodoc>,
- intltool (>=0.40.0),
- libpython3-dev <!nopython>,
+ debhelper-compat (= 13),
+ dh-sequence-python3,
+ asciidoc,
+ intltool,
  libssl-dev,
  libsystemd-dev,
- python3-dev:any <!nopython>,
  pkg-config,
+ python3-all-dev,
  systemd,
- xsltproc <!nodoc>,
- docbook-xsl <!nodoc>,
- git,
+ xsltproc,
+ docbook-xsl,
 Rules-Requires-Root: no
 Homepage: https://location.ipfire.org/
-Vcs-Git: https://git.ipfire.org/pub/git/location/libloc.git
-Vcs-Browser: https://git.ipfire.org/pub/git/location/libloc.git
+Vcs-Git: https://salsa.debian.org/debian/libloc.git
+Vcs-Browser: https://salsa.debian.org/debian/libloc
+Description: IP geolocation query library
+ libloc is a lightweight library to query the IPFire Location database and
+ determine the location of someone else on the Internet based on their IP
+ address.
 
 Package: libloc1
 Architecture: any
 Section: libs
-Pre-Depends:
- ${misc:Pre-Depends}
 Depends:
  ${shlibs:Depends},
- ${misc:Depends}
-Recommends:
- location (= ${binary:Version})
+ ${misc:Depends},
 Multi-Arch: same
-Description: Location library
- A library to determine the location of someone on the Internet
+Description: ${source:Synopsis}
+ ${source:Extended-Description}
+ .
+ This package provides the shared library.
 
 Package: libloc-dev
 Architecture: any
@@ -42,46 +42,59 @@ Section: libdevel
 Depends:
  libloc1 (= ${binary:Version}),
  ${misc:Depends},
-Suggests:
- pkg-config
 Multi-Arch: same
-Description: Development files for libloc
- Install this package if you wish to develop your own programs using
- libloc.
+Description: ${source:Synopsis} (development files)
+ ${source:Extended-Description}
+ .
+ This package provides the headers and development files needed to use libloc
+ in your own programs.
 
 Package: location
-Architecture: any
-Pre-Depends:
- ${misc:Pre-Depends}
-Depends:
- location-python (= ${binary:Version}),
- ${misc:Depends},
- ${python3:Depends}
-Multi-Arch: same
-Description: CLI utilities for libloc
- Commands to determine someone's location on the Internet
-
-Package: location-importer
-Architecture: any
-Pre-Depends:
- ${misc:Pre-Depends}
+Architecture: all
 Depends:
location-python (= ${binary:Version}),
python3-location,
  ${misc:Depends},
- ${python3:Depends}
-Multi-Arch: foreign
-Description: Tools to author location databases
- This package contains tools that are required to build location databases
+ ${python3:Depends},
+Recommends:
+ libloc-database,
+Replaces: location-importer (<< 0.9.14-1~)
+Breaks: location-importer (<< 0.9.14-1~)
+Description: ${source:Synopsis} (CLI utilities)
+ ${source:Extended-Description}
+ .
+ This package provides CLI utilities based on libloc.
 
-Package: location-python
+Package: python3-location
 Architecture: any
 Section: python
-Pre-Depends:
- ${misc:Pre-Depends}
 Depends:
  ${misc:Depends},
  ${python3:Depends},
- ${shlibs:Depends}
+ ${shlibs:Depends},
+ python3-psycopg2,
+Replaces:
+ location-python (<< 0.9.14-1~),
+Breaks:
+ location-python (<< 0.9.14-1~),
+ location-importer (<< 0.9.14-1~),
 Multi-Arch: foreign
-Description: Python modules for libloc
- This package contains Python bindings for libloc
+Description: ${source:Synopsis} (Python 3 bindings)
+ ${source:Extended-Description}
+ .
+ This package provides the Python 3 bindings for libloc.
+
+Package: location-python
+Depends: python3-location, ${misc:Depends}
+Architecture: all
+Priority: optional
+Section: oldlibs
+Description: transitional package
+ This is a transitional package. It can safely be removed.
+
+Package: location-importer
+Depends: location, ${misc:Depends}
+Architecture: all
+Priority: optional
+Section: oldlibs
+Description: transitional package
+ This is a transitional package. It can safely be removed.
index 3bd76541892a08204e03f1fe4ccbe9abc903d5f9..2877361355a7b2eac8391d0f7c5587ad0cb049c3 100644 (file)
@@ -4,14 +4,516 @@ Upstream-Contact: Michael Tremer <michael.tremer@ipfire.org>
 Source: https://location.ipfire.org/download
 
 Files: *
-Copyright: 2017-2019 IPFire Development team <info@ipfire.org>
-License: LGPL-2.1
+Copyright: 2017-2022, IPFire Development Team <info@ipfire.org>
+License: LGPL-2.1+
+
+Files: m4/*
+       src/test-address.c
+       src/test-as.c
+       src/test-country.c
+       src/test-database.c
+       src/test-libloc.c
+       src/test-network-list.c
+       src/test-network.c
+       src/test-signature.c
+       src/test-stringpool.c
+Copyright: 2006-2008, Diego Pettenò <flameeyes@gmail.com>
+           2017-2022, IPFire Development Team <info@ipfire.org>
+           2012, Lucas De Marchi <lucas.de.marchi@gmail.com>
+           2006-2008, xine project
+License: GPL-2+
+
+Files: src/perl/lib/*
+Copyright: 2019, Stefan Schantl
+License: Artistic-or-GPL
+
+Files: m4/ax_prog_perl_modules.m4
+Copyright: 2009, Dean Povey <povey@wedgetail.com>
+License: FSFAP
+
+Files: m4/ld-version-script.m4
+Copyright: 2008-2015, Free Software Foundation, Inc
+License: FSFULLR
+
+Files: tests/data/*
+Copyright: 2017-2022, IPFire Development Team <info@ipfire.org>
+License: CC-BY-SA-4
 
 Files: debian/*
-Copyright: 2019 Stefan Schantl <stefan.schantl@ipfire.org>
-License: LGPL-2.1
+Copyright: 2022, Jochen Sprickerhof <jspricke@debian.org>
+           2019, Stefan Schantl <stefan.schantl@ipfire.org>
+License: LGPL-2.1+
+
+License: Artistic-or-GPL
+ This library is free software; you can redistribute it and/or modify
+ it under the same terms as Perl itself, either Perl version 5.28.1 or,
+ at your option, any later version of Perl 5 you may have available.
+ .
+ On Debian GNU/Linux systems, the complete text of the GNU General
+ Public License can be found in '/usr/share/common-licenses/GPL' and
+ the Artistic Licence in '/usr/share/common-licenses/Artistic'.
 
-License: LGPL-2.1
+License: CC-BY-SA-4
+ http://creativecommons.org/licenses/by-sa/4.0/
+ .
+ Attribution-ShareAlike 4.0 International
+ .
+ =======================================================================
+ .
+ Creative Commons Corporation ("Creative Commons") is not a law firm and
+ does not provide legal services or legal advice. Distribution of
+ Creative Commons public licenses does not create a lawyer-client or
+ other relationship. Creative Commons makes its licenses and related
+ information available on an "as-is" basis. Creative Commons gives no
+ warranties regarding its licenses, any material licensed under their
+ terms and conditions, or any related information. Creative Commons
+ disclaims all liability for damages resulting from their use to the
+ fullest extent possible.
+ .
+ Using Creative Commons Public Licenses
+ .
+ Creative Commons public licenses provide a standard set of terms and
+ conditions that creators and other rights holders may use to share
+ original works of authorship and other material subject to copyright
+ and certain other rights specified in the public license below. The
+ following considerations are for informational purposes only, are not
+ exhaustive, and do not form part of our licenses.
+ .
+      Considerations for licensors: Our public licenses are
+      intended for use by those authorized to give the public
+      permission to use material in ways otherwise restricted by
+      copyright and certain other rights. Our licenses are
+      irrevocable. Licensors should read and understand the terms
+      and conditions of the license they choose before applying it.
+      Licensors should also secure all rights necessary before
+      applying our licenses so that the public can reuse the
+      material as expected. Licensors should clearly mark any
+      material not subject to the license. This includes other CC-
+      licensed material, or material used under an exception or
+      limitation to copyright. More considerations for licensors:
+        wiki.creativecommons.org/Considerations_for_licensors
+ .
+      Considerations for the public: By using one of our public
+      licenses, a licensor grants the public permission to use the
+      licensed material under specified terms and conditions. If
+      the licensor's permission is not necessary for any reason--for
+      example, because of any applicable exception or limitation to
+      copyright--then that use is not regulated by the license. Our
+      licenses grant only permissions under copyright and certain
+      other rights that a licensor has authority to grant. Use of
+      the licensed material may still be restricted for other
+      reasons, including because others have copyright or other
+      rights in the material. A licensor may make special requests,
+      such as asking that all changes be marked or described.
+      Although not required by our licenses, you are encouraged to
+      respect those requests where reasonable. More_considerations
+      for the public:
+         wiki.creativecommons.org/Considerations_for_licensees
+ .
+ =======================================================================
+ .
+ Creative Commons Attribution-ShareAlike 4.0 International Public
+ License
+ .
+ By exercising the Licensed Rights (defined below), You accept and agree
+ to be bound by the terms and conditions of this Creative Commons
+ Attribution-ShareAlike 4.0 International Public License ("Public
+ License"). To the extent this Public License may be interpreted as a
+ contract, You are granted the Licensed Rights in consideration of Your
+ acceptance of these terms and conditions, and the Licensor grants You
+ such rights in consideration of benefits the Licensor receives from
+ making the Licensed Material available under these terms and
+ conditions.
+ .
+ .
+ Section 1 -- Definitions.
+ .
+   a. Adapted Material means material subject to Copyright and Similar
+      Rights that is derived from or based upon the Licensed Material
+      and in which the Licensed Material is translated, altered,
+      arranged, transformed, or otherwise modified in a manner requiring
+      permission under the Copyright and Similar Rights held by the
+      Licensor. For purposes of this Public License, where the Licensed
+      Material is a musical work, performance, or sound recording,
+      Adapted Material is always produced where the Licensed Material is
+      synched in timed relation with a moving image.
+ .
+   b. Adapter's License means the license You apply to Your Copyright
+      and Similar Rights in Your contributions to Adapted Material in
+      accordance with the terms and conditions of this Public License.
+ .
+   c. BY-SA Compatible License means a license listed at
+      creativecommons.org/compatiblelicenses, approved by Creative
+      Commons as essentially the equivalent of this Public License.
+ .
+   d. Copyright and Similar Rights means copyright and/or similar rights
+      closely related to copyright including, without limitation,
+      performance, broadcast, sound recording, and Sui Generis Database
+      Rights, without regard to how the rights are labeled or
+      categorized. For purposes of this Public License, the rights
+      specified in Section 2(b)(1)-(2) are not Copyright and Similar
+      Rights.
+ .
+   e. Effective Technological Measures means those measures that, in the
+      absence of proper authority, may not be circumvented under laws
+      fulfilling obligations under Article 11 of the WIPO Copyright
+      Treaty adopted on December 20, 1996, and/or similar international
+      agreements.
+ .
+   f. Exceptions and Limitations means fair use, fair dealing, and/or
+      any other exception or limitation to Copyright and Similar Rights
+      that applies to Your use of the Licensed Material.
+ .
+   g. License Elements means the license attributes listed in the name
+      of a Creative Commons Public License. The License Elements of this
+      Public License are Attribution and ShareAlike.
+ .
+   h. Licensed Material means the artistic or literary work, database,
+      or other material to which the Licensor applied this Public
+      License.
+ .
+   i. Licensed Rights means the rights granted to You subject to the
+      terms and conditions of this Public License, which are limited to
+      all Copyright and Similar Rights that apply to Your use of the
+      Licensed Material and that the Licensor has authority to license.
+ .
+   j. Licensor means the individual(s) or entity(ies) granting rights
+      under this Public License.
+ .
+   k. Share means to provide material to the public by any means or
+      process that requires permission under the Licensed Rights, such
+      as reproduction, public display, public performance, distribution,
+      dissemination, communication, or importation, and to make material
+      available to the public including in ways that members of the
+      public may access the material from a place and at a time
+      individually chosen by them.
+ .
+   l. Sui Generis Database Rights means rights other than copyright
+      resulting from Directive 96/9/EC of the European Parliament and of
+      the Council of 11 March 1996 on the legal protection of databases,
+      as amended and/or succeeded, as well as other essentially
+      equivalent rights anywhere in the world.
+ .
+   m. You means the individual or entity exercising the Licensed Rights
+      under this Public License. Your has a corresponding meaning.
+ .
+ .
+ Section 2 -- Scope.
+ .
+   a. License grant.
+ .
+        1. Subject to the terms and conditions of this Public License,
+           the Licensor hereby grants You a worldwide, royalty-free,
+           non-sublicensable, non-exclusive, irrevocable license to
+           exercise the Licensed Rights in the Licensed Material to:
+ .
+             a. reproduce and Share the Licensed Material, in whole or
+                in part; and
+ .
+             b. produce, reproduce, and Share Adapted Material.
+ .
+        2. Exceptions and Limitations. For the avoidance of doubt, where
+           Exceptions and Limitations apply to Your use, this Public
+           License does not apply, and You do not need to comply with
+           its terms and conditions.
+ .
+        3. Term. The term of this Public License is specified in Section
+           6(a).
+ .
+        4. Media and formats; technical modifications allowed. The
+           Licensor authorizes You to exercise the Licensed Rights in
+           all media and formats whether now known or hereafter created,
+           and to make technical modifications necessary to do so. The
+           Licensor waives and/or agrees not to assert any right or
+           authority to forbid You from making technical modifications
+           necessary to exercise the Licensed Rights, including
+           technical modifications necessary to circumvent Effective
+           Technological Measures. For purposes of this Public License,
+           simply making modifications authorized by this Section 2(a)
+           (4) never produces Adapted Material.
+ .
+        5. Downstream recipients.
+ .
+             a. Offer from the Licensor -- Licensed Material. Every
+                recipient of the Licensed Material automatically
+                receives an offer from the Licensor to exercise the
+                Licensed Rights under the terms and conditions of this
+                Public License.
+ .
+             b. Additional offer from the Licensor -- Adapted Material.
+                Every recipient of Adapted Material from You
+                automatically receives an offer from the Licensor to
+                exercise the Licensed Rights in the Adapted Material
+                under the conditions of the Adapter's License You apply.
+ .
+             c. No downstream restrictions. You may not offer or impose
+                any additional or different terms or conditions on, or
+                apply any Effective Technological Measures to, the
+                Licensed Material if doing so restricts exercise of the
+                Licensed Rights by any recipient of the Licensed
+                Material.
+ .
+        6. No endorsement. Nothing in this Public License constitutes or
+           may be construed as permission to assert or imply that You
+           are, or that Your use of the Licensed Material is, connected
+           with, or sponsored, endorsed, or granted official status by,
+           the Licensor or others designated to receive attribution as
+           provided in Section 3(a)(1)(A)(i).
+ .
+   b. Other rights.
+ .
+        1. Moral rights, such as the right of integrity, are not
+           licensed under this Public License, nor are publicity,
+           privacy, and/or other similar personality rights; however, to
+           the extent possible, the Licensor waives and/or agrees not to
+           assert any such rights held by the Licensor to the limited
+           extent necessary to allow You to exercise the Licensed
+           Rights, but not otherwise.
+ .
+        2. Patent and trademark rights are not licensed under this
+           Public License.
+ .
+        3. To the extent possible, the Licensor waives any right to
+           collect royalties from You for the exercise of the Licensed
+           Rights, whether directly or through a collecting society
+           under any voluntary or waivable statutory or compulsory
+           licensing scheme. In all other cases the Licensor expressly
+           reserves any right to collect such royalties.
+ .
+ .
+ Section 3 -- License Conditions.
+ .
+ Your exercise of the Licensed Rights is expressly made subject to the
+ following conditions.
+ .
+   a. Attribution.
+ .
+        1. If You Share the Licensed Material (including in modified
+           form), You must:
+ .
+             a. retain the following if it is supplied by the Licensor
+                with the Licensed Material:
+ .
+                  i. identification of the creator(s) of the Licensed
+                     Material and any others designated to receive
+                     attribution, in any reasonable manner requested by
+                     the Licensor (including by pseudonym if
+                     designated);
+ .
+                 ii. a copyright notice;
+ .
+                iii. a notice that refers to this Public License;
+ .
+                 iv. a notice that refers to the disclaimer of
+                     warranties;
+ .
+                  v. a URI or hyperlink to the Licensed Material to the
+                     extent reasonably practicable;
+ .
+             b. indicate if You modified the Licensed Material and
+                retain an indication of any previous modifications; and
+ .
+             c. indicate the Licensed Material is licensed under this
+                Public License, and include the text of, or the URI or
+                hyperlink to, this Public License.
+ .
+        2. You may satisfy the conditions in Section 3(a)(1) in any
+           reasonable manner based on the medium, means, and context in
+           which You Share the Licensed Material. For example, it may be
+           reasonable to satisfy the conditions by providing a URI or
+           hyperlink to a resource that includes the required
+           information.
+ .
+        3. If requested by the Licensor, You must remove any of the
+           information required by Section 3(a)(1)(A) to the extent
+           reasonably practicable.
+ .
+   b. ShareAlike.
+ .
+      In addition to the conditions in Section 3(a), if You Share
+      Adapted Material You produce, the following conditions also apply.
+ .
+        1. The Adapter's License You apply must be a Creative Commons
+           license with the same License Elements, this version or
+           later, or a BY-SA Compatible License.
+ .
+        2. You must include the text of, or the URI or hyperlink to, the
+           Adapter's License You apply. You may satisfy this condition
+           in any reasonable manner based on the medium, means, and
+           context in which You Share Adapted Material.
+ .
+        3. You may not offer or impose any additional or different terms
+           or conditions on, or apply any Effective Technological
+           Measures to, Adapted Material that restrict exercise of the
+           rights granted under the Adapter's License You apply.
+ .
+ .
+ Section 4 -- Sui Generis Database Rights.
+ .
+ Where the Licensed Rights include Sui Generis Database Rights that
+ apply to Your use of the Licensed Material:
+ .
+   a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+      to extract, reuse, reproduce, and Share all or a substantial
+      portion of the contents of the database;
+ .
+   b. if You include all or a substantial portion of the database
+      contents in a database in which You have Sui Generis Database
+      Rights, then the database in which You have Sui Generis Database
+      Rights (but not its individual contents) is Adapted Material,
+ .
+      including for purposes of Section 3(b); and
+   c. You must comply with the conditions in Section 3(a) if You Share
+      all or a substantial portion of the contents of the database.
+ .
+ For the avoidance of doubt, this Section 4 supplements and does not
+ replace Your obligations under this Public License where the Licensed
+ Rights include other Copyright and Similar Rights.
+ .
+ .
+ Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+ .
+   a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+      EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+      AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+      ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+      IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+      WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+      PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+      ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+      KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+      ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+ .
+   b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+      TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+      NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+      INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+      COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+      USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+      ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+      DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+      IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+ .
+   c. The disclaimer of warranties and limitation of liability provided
+      above shall be interpreted in a manner that, to the extent
+      possible, most closely approximates an absolute disclaimer and
+      waiver of all liability.
+ .
+ .
+ Section 6 -- Term and Termination.
+ .
+   a. This Public License applies for the term of the Copyright and
+      Similar Rights licensed here. However, if You fail to comply with
+      this Public License, then Your rights under this Public License
+      terminate automatically.
+ .
+   b. Where Your right to use the Licensed Material has terminated under
+      Section 6(a), it reinstates:
+ .
+        1. automatically as of the date the violation is cured, provided
+           it is cured within 30 days of Your discovery of the
+           violation; or
+ .
+        2. upon express reinstatement by the Licensor.
+ .
+      For the avoidance of doubt, this Section 6(b) does not affect any
+      right the Licensor may have to seek remedies for Your violations
+      of this Public License.
+ .
+   c. For the avoidance of doubt, the Licensor may also offer the
+      Licensed Material under separate terms or conditions or stop
+      distributing the Licensed Material at any time; however, doing so
+      will not terminate this Public License.
+ .
+   d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+      License.
+ .
+ .
+ Section 7 -- Other Terms and Conditions.
+ .
+   a. The Licensor shall not be bound by any additional or different
+      terms or conditions communicated by You unless expressly agreed.
+ .
+   b. Any arrangements, understandings, or agreements regarding the
+      Licensed Material not stated herein are separate from and
+      independent of the terms and conditions of this Public License.
+ .
+ .
+ Section 8 -- Interpretation.
+ .
+   a. For the avoidance of doubt, this Public License does not, and
+      shall not be interpreted to, reduce, limit, restrict, or impose
+      conditions on any use of the Licensed Material that could lawfully
+      be made without permission under this Public License.
+ .
+   b. To the extent possible, if any provision of this Public License is
+      deemed unenforceable, it shall be automatically reformed to the
+      minimum extent necessary to make it enforceable. If the provision
+      cannot be reformed, it shall be severed from this Public License
+      without affecting the enforceability of the remaining terms and
+      conditions.
+ .
+   c. No term or condition of this Public License will be waived and no
+      failure to comply consented to unless expressly agreed to by the
+      Licensor.
+ .
+   d. Nothing in this Public License constitutes or may be interpreted
+      as a limitation upon, or waiver of, any privileges and immunities
+      that apply to the Licensor or You, including from the legal
+      processes of any jurisdiction or authority.
+ .
+ .
+ =======================================================================
+ .
+ Creative Commons is not a party to its public
+ licenses. Notwithstanding, Creative Commons may elect to apply one of
+ its public licenses to material it publishes and in those instances
+ will be considered the “Licensor.” The text of the Creative Commons
+ public licenses is dedicated to the public domain under the CC0 Public
+ Domain Dedication. Except for the limited purpose of indicating that
+ material is shared under a Creative Commons public license or as
+ otherwise permitted by the Creative Commons policies published at
+ creativecommons.org/policies, Creative Commons does not authorize the
+ use of the trademark "Creative Commons" or any other trademark or logo
+ of Creative Commons without its prior written consent including,
+ without limitation, in connection with any unauthorized modifications
+ to any of its public licenses or any other arrangements,
+ understandings, or agreements concerning use of licensed material. For
+ the avoidance of doubt, this paragraph does not form part of the
+ public licenses.
+ .
+ Creative Commons may be contacted at creativecommons.org.
+
+License: FSFAP
+  Copying and distribution of this file, with or without modification, are
+  permitted in any medium without royalty provided the copyright notice
+  and this notice are preserved.  This file is offered as-is, without any
+  warranty.
+
+License: FSFULLR
+ This file is free software; the Free Software Foundation gives
+ unlimited permission to copy and/or distribute it, with or without
+ modifications, as long as this notice is preserved.
+
+License: GPL-2+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+ .
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ GNU General Public License for more details.
+ .
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ .
+ On Debian systems, the complete text of the GNU General Public
+ License version 2 can be found in `/usr/share/common-licenses/GPL-2'.
+
+License: LGPL-2.1+
  This program is free software; you can redistribute it and/or modify it
  under the terms of the GNU Lesser General Public License as published by the
  Free Software Foundation; version 2.1 of the License, or (at
@@ -23,4 +525,3 @@ License: LGPL-2.1
  .
  The complete text of the GNU General Public License
  can be found in /usr/share/common-licenses/LGPL-2.1 file.
- .
diff --git a/debian/genchangelog.sh b/debian/genchangelog.sh
new file mode 100755 (executable)
index 0000000..ab1c198
--- /dev/null
@@ -0,0 +1,38 @@
+#!/bin/bash -e
+gitshow () {
+  local format=$1
+  local commit=$2
+
+  git show --no-patch --format=format:"$format" "$commit"
+}
+
+main () {
+  if [ $# -lt 1 ]; then
+    local bn="$(basename $0)"
+    echo "Usage:    $bn  <commit range>" >&2
+    echo "Example:  $bn  0.9.7..HEAD" >&2
+    echo "Example:  $bn  0.9.5..0.9.6^" >&2
+    return 1
+  fi
+
+  local commitrange=$1
+
+  local commit
+  for commit in $(git rev-list --reverse "$commitrange"); do
+    # Skip commits with diffs that only have Makefile.am or d/ changes.
+    if [ "$(git diff --name-only "${commit}^..${commit}" -- . ':^Makefile.am' ':^debian/' | wc -l)" == 0 ]; then
+      continue
+    fi
+
+    local author_name="$(gitshow %an "$commit")"
+    local author_email="$(gitshow %ae "$commit")"
+    local subject="$(gitshow %s "$commit")"
+
+    echo "$author_name <$author_email>  $subject"
+    DEBFULLNAME="$author_name" DEBEMAIL="$author_email" debchange --upstream --multimaint-merge "$subject"
+  done
+
+  debchange --release ''
+}
+
+main "$@" || exit $?
index d93d217383a63c1326278273e1bfa1a5d5df4aa4..04e85faa5a9e0aa93148d3ce1d2010ad208d7515 100644 (file)
@@ -1,3 +1,4 @@
 usr/include/libloc
 usr/lib/*/libloc.so
 usr/lib/*/pkgconfig
+usr/share/man/man3
index 0f8eec4d780810da83ed155ef57103035ee4620d..e6cb2ac11aa264f8f9a88fe43aec1092b523df14 100644 (file)
@@ -1 +1,2 @@
 usr/lib/*/libloc.so.*
+usr/share/locale/*/LC_MESSAGES/libloc.mo
index 74b70b599c752d08b7dfa83c38ead9e6b2cb8b37..37705357e8ad21fe27225c3362a27d67d92218d6 100644 (file)
@@ -1,7 +1,6 @@
 libloc.so.1 libloc1 #MINVER#
 * Build-Depends-Package: libloc-dev
  LIBLOC_1@LIBLOC_1 0.9.4
- LIBLOC_PRIVATE@LIBLOC_PRIVATE 0.9.4
  loc_as_cmp@LIBLOC_1 0.9.4
  loc_as_get_name@LIBLOC_1 0.9.4
  loc_as_get_number@LIBLOC_1 0.9.4
@@ -14,6 +13,7 @@ libloc.so.1 libloc1 #MINVER#
  loc_as_list_new@LIBLOC_1 0.9.5
  loc_as_list_ref@LIBLOC_1 0.9.5
  loc_as_list_size@LIBLOC_1 0.9.5
+ loc_as_list_sort@LIBLOC_1 0.9.12
  loc_as_list_unref@LIBLOC_1 0.9.5
  loc_as_new@LIBLOC_1 0.9.4
  loc_as_ref@LIBLOC_1 0.9.4
@@ -33,11 +33,13 @@ libloc.so.1 libloc1 #MINVER#
  loc_country_list_new@LIBLOC_1 0.9.5
  loc_country_list_ref@LIBLOC_1 0.9.5
  loc_country_list_size@LIBLOC_1 0.9.5
+ loc_country_list_sort@LIBLOC_1 0.9.12
  loc_country_list_unref@LIBLOC_1 0.9.5
  loc_country_new@LIBLOC_1 0.9.4
  loc_country_ref@LIBLOC_1 0.9.4
  loc_country_set_continent_code@LIBLOC_1 0.9.4
  loc_country_set_name@LIBLOC_1 0.9.4
+ loc_country_special_code_to_flag@LIBLOC_1 0.9.8
  loc_country_unref@LIBLOC_1 0.9.4
  loc_database_count_as@LIBLOC_1 0.9.4
  loc_database_created_at@LIBLOC_1 0.9.4
@@ -92,10 +94,8 @@ libloc.so.1 libloc1 #MINVER#
  loc_network_list_ref@LIBLOC_1 0.9.5
  loc_network_list_size@LIBLOC_1 0.9.5
  loc_network_list_unref@LIBLOC_1 0.9.5
- loc_network_match_address@LIBLOC_1 0.9.5
- loc_network_match_asn@LIBLOC_1 0.9.4
- loc_network_match_country_code@LIBLOC_1 0.9.4
- loc_network_match_flag@LIBLOC_1 0.9.4
+ loc_network_matches_address@LIBLOC_1 0.9.8
+ loc_network_matches_country_code@LIBLOC_1 0.9.8
  loc_network_new@LIBLOC_1 0.9.4
  loc_network_new_from_string@LIBLOC_1 0.9.4
  loc_network_overlaps@LIBLOC_1 0.9.5
@@ -111,13 +111,6 @@ libloc.so.1 libloc1 #MINVER#
  loc_ref@LIBLOC_1 0.9.4
  loc_set_log_fn@LIBLOC_1 0.9.4
  loc_set_log_priority@LIBLOC_1 0.9.4
- loc_stringpool_add@LIBLOC_PRIVATE 0.9.4
- loc_stringpool_dump@LIBLOC_PRIVATE 0.9.4
- loc_stringpool_get@LIBLOC_PRIVATE 0.9.4
- loc_stringpool_get_size@LIBLOC_PRIVATE 0.9.4
- loc_stringpool_new@LIBLOC_PRIVATE 0.9.4
- loc_stringpool_ref@LIBLOC_PRIVATE 0.9.4
- loc_stringpool_unref@LIBLOC_PRIVATE 0.9.4
  loc_unref@LIBLOC_1 0.9.4
  loc_writer_add_as@LIBLOC_1 0.9.4
  loc_writer_add_country@LIBLOC_1 0.9.4
diff --git a/debian/location-importer.install b/debian/location-importer.install
deleted file mode 100644 (file)
index eaae79d..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-usr/bin/location-importer
-usr/lib/python3*/site-packages/location/database.py
-usr/lib/python3*/site-packages/location/importer.py
diff --git a/debian/location-perl.install b/debian/location-perl.install
deleted file mode 100644 (file)
index 08e8cc4..0000000
+++ /dev/null
@@ -1 +0,0 @@
-usr/lib/*/perl/
diff --git a/debian/location-python.install b/debian/location-python.install
deleted file mode 100644 (file)
index a6004ca..0000000
+++ /dev/null
@@ -1 +0,0 @@
-usr/lib/python3*/site-packages
index 25d9b6fcce2cbc89b36109bc9f2a79346ead5164..deb6c4d6e7c1c01bf8bf90abd171b25c779a5514 100644 (file)
@@ -1,4 +1,6 @@
-usr/bin/location
+usr/bin
+usr/share/bash-completion/completions/location
+var/lib/location/database.db
 var/lib/location/signing-key.pem
-src/systemd/*.service          /lib/systemd/system/
-src/systemd/*.timer            /lib/systemd/system/
+lib/systemd/system
+usr/share/man/man1
diff --git a/debian/location.manpages b/debian/location.manpages
deleted file mode 100644 (file)
index 3e662bb..0000000
+++ /dev/null
@@ -1 +0,0 @@
-man/location.8
diff --git a/debian/location.postinst b/debian/location.postinst
new file mode 100644 (file)
index 0000000..913f39c
--- /dev/null
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+  configure)
+    mkdir -p /var/lib/location || true
+    ln -s /usr/share/libloc-location/location.db /var/lib/location/database.db 2>/dev/null || true
+    ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/location.postrm b/debian/location.postrm
new file mode 100644 (file)
index 0000000..df1b03e
--- /dev/null
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+  purge)
+    rm -f /var/lib/location/database.db 2>/dev/null
+    rm -f /var/lib/location/signing-key.pem 2>/dev/null
+    rmdir /var/lib/location || true
+    ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/python3-location.install b/debian/python3-location.install
new file mode 100644 (file)
index 0000000..4606faa
--- /dev/null
@@ -0,0 +1 @@
+usr/lib/python3*
index 05b88fd59b13eea96e8b1960a9b48c08dfe1f306..e5e3f18ea1ffa34e8a19cad72cc62fd964c39c73 100755 (executable)
@@ -1,28 +1,17 @@
 #!/usr/bin/make -f
 
-# enable verbose mode
-#export DH_VERBOSE=1
-
-# enable all hardening build flags
 export DEB_BUILD_MAINT_OPTIONS=hardening=+all
+export PYBUILD_SYSTEM=custom
+export PYBUILD_CLEAN_ARGS=dh_auto_clean
+export PYBUILD_CONFIGURE_ARGS=intltoolize --force --automake; \
+       PYTHON={interpreter} dh_auto_configure -- \
+       --disable-perl
+export PYBUILD_BUILD_ARGS=dh_auto_build
+export PYBUILD_INSTALL_ARGS=dh_auto_install --destdir={destdir}; \
+       mkdir -p {destdir}/usr/lib/python{version}/dist-packages; \
+       mv {destdir}/usr/lib/python3/dist-packages/_location.so {destdir}/usr/lib/python{version}/dist-packages/_location.so; \
+       rm -f {destdir}/usr/lib/python3/dist-packages/_location.la {destdir}/usr/lib/*/libloc.la
+export PYBUILD_TEST_ARGS=dh_auto_test
 
 %:
-       dh $@ --with python3 --with-systemd
-
-override_dh_auto_configure:
-       intltoolize --force --automake
-       dh_auto_configure -- --disable-perl
-
-override_dh_perl:
-       dh_perl -d
-
-override_dh_systemd_enable:
-       dh_systemd_enable location-update.timer
-
-override_dh_install:
-       dh_install
-       # lintian: unknown-file-in-python-module-directory
-       rm debian/location-python/usr/lib/python3*/site-packages/_location.la
-       # linitan: binaries-have-file-conflict (d/location-importer.install)
-       rm debian/location-python/usr/lib/python3*/site-packages/location/database.py
-       rm debian/location-python/usr/lib/python3*/site-packages/location/importer.py
+       dh $@ --buildsystem=pybuild
index 19ace6dcc091032e7050880823f2f34bbe564a8d..f466401edfdda518a06730c0af2bcc64d6adaa0d 100644 (file)
@@ -1,3 +1,3 @@
 version=4
 https://source.ipfire.org/releases/libloc/ \
-    @PACKAGE@@ANY_VERSION@@ARCHIVE_EXT@ debian uupdate
+    @PACKAGE@@ANY_VERSION@@ARCHIVE_EXT@
diff --git a/m4/attributes.m4 b/m4/attributes.m4
new file mode 100644 (file)
index 0000000..7e080da
--- /dev/null
@@ -0,0 +1,288 @@
+dnl Macros to check the presence of generic (non-typed) symbols.
+dnl Copyright (c) 2006-2008 Diego Pettenò <flameeyes@gmail.com>
+dnl Copyright (c) 2006-2008 xine project
+dnl Copyright (c) 2012 Lucas De Marchi <lucas.de.marchi@gmail.com>
+dnl
+dnl This program is free software; you can redistribute it and/or modify
+dnl it under the terms of the GNU General Public License as published by
+dnl the Free Software Foundation; either version 2, or (at your option)
+dnl any later version.
+dnl
+dnl This program is distributed in the hope that it will be useful,
+dnl but WITHOUT ANY WARRANTY; without even the implied warranty of
+dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+dnl GNU General Public License for more details.
+dnl
+dnl You should have received a copy of the GNU General Public License
+dnl along with this program; if not, write to the Free Software
+dnl Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+dnl 02110-1301, USA.
+dnl
+dnl As a special exception, the copyright owners of the
+dnl macro gives unlimited permission to copy, distribute and modify the
+dnl configure scripts that are the output of Autoconf when processing the
+dnl Macro. You need not follow the terms of the GNU General Public
+dnl License when using or distributing such scripts, even though portions
+dnl of the text of the Macro appear in them. The GNU General Public
+dnl License (GPL) does govern all other use of the material that
+dnl constitutes the Autoconf Macro.
+dnl
+dnl This special exception to the GPL applies to versions of the
+dnl Autoconf Macro released by this project. When you make and
+dnl distribute a modified version of the Autoconf Macro, you may extend
+dnl this special exception to the GPL to apply to your modified version as
+dnl well.
+
+dnl Check if FLAG in ENV-VAR is supported by compiler and append it
+dnl to WHERE-TO-APPEND variable
+dnl CC_CHECK_FLAG_APPEND([WHERE-TO-APPEND], [ENV-VAR], [FLAG])
+
+AC_DEFUN([CC_CHECK_FLAG_APPEND], [
+  AC_CACHE_CHECK([if $CC supports flag $3 in envvar $2],
+                 AS_TR_SH([cc_cv_$2_$3]),
+          [eval "AS_TR_SH([cc_save_$2])='${$2}'"
+           eval "AS_TR_SH([$2])='-Werror $3'"
+           AC_COMPILE_IFELSE([AC_LANG_SOURCE([int a = 0; int main(void) { return a; } ])],
+                                    [eval "AS_TR_SH([cc_cv_$2_$3])='yes'"],
+                                    [eval "AS_TR_SH([cc_cv_$2_$3])='no'"])
+           eval "AS_TR_SH([$2])='$cc_save_$2'"])
+
+  AS_IF([eval test x$]AS_TR_SH([cc_cv_$2_$3])[ = xyes],
+        [eval "$1='${$1} $3'"])
+])
+
+dnl CC_CHECK_FLAGS_APPEND([WHERE-TO-APPEND], [ENV-VAR], [FLAG1 FLAG2])
+AC_DEFUN([CC_CHECK_FLAGS_APPEND], [
+  for flag in $3; do
+    CC_CHECK_FLAG_APPEND($1, $2, $flag)
+  done
+])
+
+dnl Check if the flag is supported by linker (cacheable)
+dnl CC_CHECK_LDFLAGS([FLAG], [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND])
+
+AC_DEFUN([CC_CHECK_LDFLAGS], [
+  AC_CACHE_CHECK([if $CC supports $1 flag],
+    AS_TR_SH([cc_cv_ldflags_$1]),
+    [ac_save_LDFLAGS="$LDFLAGS"
+     LDFLAGS="$LDFLAGS $1"
+     AC_LINK_IFELSE([int main() { return 1; }],
+       [eval "AS_TR_SH([cc_cv_ldflags_$1])='yes'"],
+       [eval "AS_TR_SH([cc_cv_ldflags_$1])="])
+     LDFLAGS="$ac_save_LDFLAGS"
+    ])
+
+  AS_IF([eval test x$]AS_TR_SH([cc_cv_ldflags_$1])[ = xyes],
+    [$2], [$3])
+])
+
+dnl define the LDFLAGS_NOUNDEFINED variable with the correct value for
+dnl the current linker to avoid undefined references in a shared object.
+AC_DEFUN([CC_NOUNDEFINED], [
+  dnl We check $host for which systems to enable this for.
+  AC_REQUIRE([AC_CANONICAL_HOST])
+
+  case $host in
+     dnl FreeBSD (et al.) does not complete linking for shared objects when pthreads
+     dnl are requested, as different implementations are present; to avoid problems
+     dnl use -Wl,-z,defs only for those platform not behaving this way.
+     *-freebsd* | *-openbsd*) ;;
+     *)
+        dnl First of all check for the --no-undefined variant of GNU ld. This allows
+        dnl for a much more readable commandline, so that people can understand what
+        dnl it does without going to look for what the heck -z defs does.
+        for possible_flags in "-Wl,--no-undefined" "-Wl,-z,defs"; do
+           CC_CHECK_LDFLAGS([$possible_flags], [LDFLAGS_NOUNDEFINED="$possible_flags"])
+           break
+        done
+     ;;
+  esac
+
+  AC_SUBST([LDFLAGS_NOUNDEFINED])
+])
+
+dnl Check for a -Werror flag or equivalent. -Werror is the GCC
+dnl and ICC flag that tells the compiler to treat all the warnings
+dnl as fatal. We usually need this option to make sure that some
+dnl constructs (like attributes) are not simply ignored.
+dnl
+dnl Other compilers don't support -Werror per se, but they support
+dnl an equivalent flag:
+dnl  - Sun Studio compiler supports -errwarn=%all
+AC_DEFUN([CC_CHECK_WERROR], [
+  AC_CACHE_CHECK(
+    [for $CC way to treat warnings as errors],
+    [cc_cv_werror],
+    [CC_CHECK_CFLAGS_SILENT([-Werror], [cc_cv_werror=-Werror],
+      [CC_CHECK_CFLAGS_SILENT([-errwarn=%all], [cc_cv_werror=-errwarn=%all])])
+    ])
+])
+
+AC_DEFUN([CC_CHECK_ATTRIBUTE], [
+  AC_REQUIRE([CC_CHECK_WERROR])
+  AC_CACHE_CHECK([if $CC supports __attribute__(( ifelse([$2], , [$1], [$2]) ))],
+    AS_TR_SH([cc_cv_attribute_$1]),
+    [ac_save_CFLAGS="$CFLAGS"
+     CFLAGS="$CFLAGS $cc_cv_werror"
+     AC_COMPILE_IFELSE([AC_LANG_SOURCE([$3])],
+       [eval "AS_TR_SH([cc_cv_attribute_$1])='yes'"],
+       [eval "AS_TR_SH([cc_cv_attribute_$1])='no'"])
+     CFLAGS="$ac_save_CFLAGS"
+    ])
+
+  AS_IF([eval test x$]AS_TR_SH([cc_cv_attribute_$1])[ = xyes],
+    [AC_DEFINE(
+       AS_TR_CPP([SUPPORT_ATTRIBUTE_$1]), 1,
+         [Define this if the compiler supports __attribute__(( ifelse([$2], , [$1], [$2]) ))]
+         )
+     $4],
+    [$5])
+])
+
+AC_DEFUN([CC_ATTRIBUTE_CONSTRUCTOR], [
+  CC_CHECK_ATTRIBUTE(
+    [constructor],,
+    [void __attribute__((constructor)) ctor() { int a; }],
+    [$1], [$2])
+])
+
+AC_DEFUN([CC_ATTRIBUTE_FORMAT], [
+  CC_CHECK_ATTRIBUTE(
+    [format], [format(printf, n, n)],
+    [void __attribute__((format(printf, 1, 2))) printflike(const char *fmt, ...) { fmt = (void *)0; }],
+    [$1], [$2])
+])
+
+AC_DEFUN([CC_ATTRIBUTE_FORMAT_ARG], [
+  CC_CHECK_ATTRIBUTE(
+    [format_arg], [format_arg(printf)],
+    [char *__attribute__((format_arg(1))) gettextlike(const char *fmt) { fmt = (void *)0; }],
+    [$1], [$2])
+])
+
+AC_DEFUN([CC_ATTRIBUTE_VISIBILITY], [
+  CC_CHECK_ATTRIBUTE(
+    [visibility_$1], [visibility("$1")],
+    [void __attribute__((visibility("$1"))) $1_function() { }],
+    [$2], [$3])
+])
+
+AC_DEFUN([CC_ATTRIBUTE_NONNULL], [
+  CC_CHECK_ATTRIBUTE(
+    [nonnull], [nonnull()],
+    [void __attribute__((nonnull())) some_function(void *foo, void *bar) { foo = (void*)0; bar = (void*)0; }],
+    [$1], [$2])
+])
+
+AC_DEFUN([CC_ATTRIBUTE_UNUSED], [
+  CC_CHECK_ATTRIBUTE(
+    [unused], ,
+    [void some_function(void *foo, __attribute__((unused)) void *bar);],
+    [$1], [$2])
+])
+
+AC_DEFUN([CC_ATTRIBUTE_SENTINEL], [
+  CC_CHECK_ATTRIBUTE(
+    [sentinel], ,
+    [void some_function(void *foo, ...) __attribute__((sentinel));],
+    [$1], [$2])
+])
+
+AC_DEFUN([CC_ATTRIBUTE_DEPRECATED], [
+  CC_CHECK_ATTRIBUTE(
+    [deprecated], ,
+    [void some_function(void *foo, ...) __attribute__((deprecated));],
+    [$1], [$2])
+])
+
+AC_DEFUN([CC_ATTRIBUTE_ALIAS], [
+  CC_CHECK_ATTRIBUTE(
+    [alias], [weak, alias],
+    [void other_function(void *foo) { }
+     void some_function(void *foo) __attribute__((weak, alias("other_function")));],
+    [$1], [$2])
+])
+
+AC_DEFUN([CC_ATTRIBUTE_MALLOC], [
+  CC_CHECK_ATTRIBUTE(
+    [malloc], ,
+    [void * __attribute__((malloc)) my_alloc(int n);],
+    [$1], [$2])
+])
+
+AC_DEFUN([CC_ATTRIBUTE_PACKED], [
+  CC_CHECK_ATTRIBUTE(
+    [packed], ,
+    [struct astructure { char a; int b; long c; void *d; } __attribute__((packed));],
+    [$1], [$2])
+])
+
+AC_DEFUN([CC_ATTRIBUTE_CONST], [
+  CC_CHECK_ATTRIBUTE(
+    [const], ,
+    [int __attribute__((const)) twopow(int n) { return 1 << n; } ],
+    [$1], [$2])
+])
+
+AC_DEFUN([CC_FLAG_VISIBILITY], [
+  AC_REQUIRE([CC_CHECK_WERROR])
+  AC_CACHE_CHECK([if $CC supports -fvisibility=hidden],
+    [cc_cv_flag_visibility],
+    [cc_flag_visibility_save_CFLAGS="$CFLAGS"
+     CFLAGS="$CFLAGS $cc_cv_werror"
+     CC_CHECK_CFLAGS_SILENT([-fvisibility=hidden],
+     cc_cv_flag_visibility='yes',
+     cc_cv_flag_visibility='no')
+     CFLAGS="$cc_flag_visibility_save_CFLAGS"])
+
+  AS_IF([test "x$cc_cv_flag_visibility" = "xyes"],
+    [AC_DEFINE([SUPPORT_FLAG_VISIBILITY], 1,
+       [Define this if the compiler supports the -fvisibility flag])
+     $1],
+    [$2])
+])
+
+AC_DEFUN([CC_FUNC_EXPECT], [
+  AC_REQUIRE([CC_CHECK_WERROR])
+  AC_CACHE_CHECK([if compiler has __builtin_expect function],
+    [cc_cv_func_expect],
+    [ac_save_CFLAGS="$CFLAGS"
+     CFLAGS="$CFLAGS $cc_cv_werror"
+     AC_COMPILE_IFELSE([AC_LANG_SOURCE(
+       [int some_function() {
+        int a = 3;
+        return (int)__builtin_expect(a, 3);
+     }])],
+       [cc_cv_func_expect=yes],
+       [cc_cv_func_expect=no])
+     CFLAGS="$ac_save_CFLAGS"
+    ])
+
+  AS_IF([test "x$cc_cv_func_expect" = "xyes"],
+    [AC_DEFINE([SUPPORT__BUILTIN_EXPECT], 1,
+     [Define this if the compiler supports __builtin_expect() function])
+     $1],
+    [$2])
+])
+
+AC_DEFUN([CC_ATTRIBUTE_ALIGNED], [
+  AC_REQUIRE([CC_CHECK_WERROR])
+  AC_CACHE_CHECK([highest __attribute__ ((aligned ())) supported],
+    [cc_cv_attribute_aligned],
+    [ac_save_CFLAGS="$CFLAGS"
+     CFLAGS="$CFLAGS $cc_cv_werror"
+     for cc_attribute_align_try in 64 32 16 8 4 2; do
+        AC_COMPILE_IFELSE([AC_LANG_SOURCE([
+          int main() {
+            static char c __attribute__ ((aligned($cc_attribute_align_try))) = 0;
+            return c;
+          }])], [cc_cv_attribute_aligned=$cc_attribute_align_try; break])
+     done
+     CFLAGS="$ac_save_CFLAGS"
+  ])
+
+  if test "x$cc_cv_attribute_aligned" != "x"; then
+     AC_DEFINE_UNQUOTED([ATTRIBUTE_ALIGNED_MAX], [$cc_cv_attribute_aligned],
+       [Define the highest alignment supported])
+  fi
+])
diff --git a/m4/ax_prog_lua_modules.m4 b/m4/ax_prog_lua_modules.m4
new file mode 100644 (file)
index 0000000..8af66fe
--- /dev/null
@@ -0,0 +1,67 @@
+#
+# SYNOPSIS
+#
+#   AX_PROG_LUA_MODULES([MODULES], [ACTION-IF-TRUE], [ACTION-IF-FALSE])
+#
+# DESCRIPTION
+#
+#   Checks to see if the given Lua modules are available. If true the shell
+#   commands in ACTION-IF-TRUE are executed. If not the shell commands in
+#   ACTION-IF-FALSE are run. Note if $LUA is not set (for example by
+#   calling AC_CHECK_PROG, or AC_PATH_PROG), AC_CHECK_PROG(LUA, lua, lua)
+#   will be run.
+#
+#   MODULES is a space separated list of module names. To check for a
+#   minimum version of a module, append the version number to the module
+#   name, separated by an equals sign.
+#
+#   Example:
+#
+#     AX_PROG_LUA_MODULES(module=1.0.3,, AC_MSG_WARN(Need some Lua modules)
+#
+# LICENSE
+#
+#   Copyright (c) 2024 Michael Tremer <michael.tremer@lightningwirelabs.com>
+#
+#   Copying and distribution of this file, with or without modification, are
+#   permitted in any medium without royalty provided the copyright notice
+#   and this notice are preserved. This file is offered as-is, without any
+#   warranty.
+
+AU_ALIAS([AC_PROG_LUA_MODULES], [AX_PROG_LUA_MODULES])
+AC_DEFUN([AX_PROG_LUA_MODULES], [dnl
+       m4_define([ax_lua_modules])
+       m4_foreach([ax_lua_module], m4_split(m4_normalize([$1])), [
+               m4_append([ax_lua_modules], [']m4_bpatsubst(ax_lua_module,=,[ ])[' ])
+       ])
+
+       # Make sure we have Lua
+       if test -z "$LUA"; then
+               AC_CHECK_PROG(LUA, lua, lua)
+       fi
+
+       if test "x$LUA" != x; then
+               ax_lua_modules_failed=0
+               for ax_lua_module in ax_lua_modules; do
+                       AC_MSG_CHECKING(for Lua module $ax_lua_module)
+
+                       # Would be nice to log result here, but can't rely on autoconf internals
+                       $LUA -e "require('$ax_lua_module')" > /dev/null 2>&1
+                       if test $? -ne 0; then
+                               AC_MSG_RESULT(no);
+                               ax_lua_modules_failed=1
+                       else
+                               AC_MSG_RESULT(ok);
+                       fi
+               done
+
+               # Run optional shell commands
+               if test "$ax_lua_modules_failed" = 0; then
+                       :; $2
+               else
+                       :; $3
+               fi
+       else
+               AC_MSG_WARN(could not find Lua)
+       fi
+])dnl
diff --git a/man/libloc.txt b/man/libloc.txt
new file mode 100644 (file)
index 0000000..ec14e16
--- /dev/null
@@ -0,0 +1,51 @@
+= libloc(3)
+
+== Name
+
+libloc - A tool to query the IPFire Location database
+
+== Synopsis
+[verse]
+
+#include <libloc/libloc.h>
+
+`pkg-config --cflags --libs libloc`
+
+== Description
+
+`libloc` is a lightweight library which can be used to query the IPFire
+Location database.
+
+See
+
+       * link:loc_new[3]
+       * link:loc_get_log_priority[3]
+       * link:loc_set_log_priority[3]
+       * link:loc_get_log_fn[3]
+       * link:loc_database_count_as[3]
+       * link:loc_database_get_as[3]
+       * link:loc_database_get_country[3]
+       * link:loc_database_lookup[3]
+       * link:loc_database_new[3]
+
+for more information about the functions available.
+
+== Copying
+
+Copyright (C) 2022 {author}. +
+This library is free software; you can redistribute it and/or modify it under the terms
+of the GNU Lesser General Public License as published by the Free Software Foundation;
+either version 2.1 of the License, or (at your option) any later version.
+
+== See Also
+
+link:location[1]
+
+== Bug Reports
+
+Please report all bugs to the bugtracker at https://bugzilla.ipfire.org/;
+refer to https://wiki.ipfire.org/devel/bugzilla for details.
+
+== Authors
+
+Michael Tremer
diff --git a/man/loc_database_count_as.txt b/man/loc_database_count_as.txt
new file mode 100644 (file)
index 0000000..4cbe151
--- /dev/null
@@ -0,0 +1,24 @@
+= loc_database_count_as(3)
+
+== Name
+
+loc_database_count_as - Return the number of ASes in the database
+
+== Synopsis
+[verse]
+
+#include <libloc/database.h>
+
+size_t loc_database_count_as(struct loc_database{empty}* db);
+
+== Description
+
+Returns the total number of ASes in the database.
+
+== See Also
+
+link:libloc[3]
+
+== Authors
+
+Michael Tremer
diff --git a/man/loc_database_get_as.txt b/man/loc_database_get_as.txt
new file mode 100644 (file)
index 0000000..e4b962f
--- /dev/null
@@ -0,0 +1,31 @@
+= loc_database_get_as(3)
+
+== Name
+
+loc_database_get_as - Fetch an AS from the database
+
+== Synopsis
+[verse]
+
+#include <libloc/database.h>
+
+int loc_database_get_as(struct loc_database{empty}* db, struct loc_as{empty}*{empty}* as,
+       uint32_t number);
+
+== Description
+
+This function retrieves an Autonomous System with the matching _number_ from the database
+and stores it in _as_.
+
+== Return Value
+
+On success, zero is returned. Otherwise non-zero is being returned and _errno_ is set
+accordingly.
+
+== See Also
+
+link:libloc[3]
+
+== Authors
+
+Michael Tremer
diff --git a/man/loc_database_get_country.txt b/man/loc_database_get_country.txt
new file mode 100644 (file)
index 0000000..b5ab8ec
--- /dev/null
@@ -0,0 +1,29 @@
+= loc_database_get_country(3)
+
+== Name
+
+loc_database_get_country - Fetch country information from the database
+
+== Synopsis
+
+#include <libloc/database.h>
+
+int loc_database_get_country(struct loc_database{empty}* db,
+       struct loc_country{empty}*{empty}* country, const char{empty}* code);
+
+== Description
+
+This function fetches information about the country with the matching _code_.
+
+== Return Value
+
+On success, zero is returned. Otherwise non-zero is being returned and _errno_ is set
+accordingly.
+
+== See Also
+
+link:libloc[3]
+
+== Authors
+
+Michael Tremer
diff --git a/man/loc_database_lookup.txt b/man/loc_database_lookup.txt
new file mode 100644 (file)
index 0000000..b6f780a
--- /dev/null
@@ -0,0 +1,37 @@
+= loc_database_lookup(3)
+
+== Name
+
+loc_database_lookup - Lookup a network from the database
+
+== Synopsis
+
+#include <libloc/database.h>
+
+int loc_database_lookup(struct loc_database{empty}* db,
+       const struct in6_addr{empty}* address, struct loc_network{empty}*{empty}* network);
+
+int loc_database_lookup_from_string(struct loc_database{empty}* db,
+       const char{empty}* string, struct loc_network{empty}*{empty}* network);
+
+== Description
+
+The lookup functions try finding a network in the database.
+
+_loc_database_lookup_ takes the IP address as _struct in6_addr_ format which can either
+be a regular IPv6 address or a mapped IPv4 address.
+
+_loc_database_lookup_string_ takes the IP address as string and will parse it automatically.
+
+== Return Value
+
+On success, zero is returned. Otherwise non-zero is being returned and _errno_ is set
+accordingly.
+
+== See Also
+
+link:libloc[3]
+
+== Authors
+
+Michael Tremer
diff --git a/man/loc_database_new.txt b/man/loc_database_new.txt
new file mode 100644 (file)
index 0000000..86a021b
--- /dev/null
@@ -0,0 +1,53 @@
+= loc_database_new(3)
+
+== Name
+
+loc_database_new - Create a new libloc context
+
+== Synopsis
+[verse]
+
+#include <libloc/libloc.h>
+#include <libloc/database.h>
+
+struct loc_database;
+
+int loc_database_new(struct loc_ctx{empty}* ctx,
+       struct loc_database{empty}*{empty}* database, FILE{empty}* f);
+
+Reference Counting:
+
+struct loc_database{empty}* loc_database_ref(struct loc_database{empty}* db);
+
+struct loc_database{empty}* loc_database_unref(struct loc_database{empty}* db);
+
+Access some data:
+
+time_t loc_database_created_at(struct loc_database{empty}* db);
+
+const char{empty}* loc_database_get_vendor(struct loc_database{empty}* db);
+
+const char{empty}* loc_database_get_description(struct loc_database{empty}* db);
+
+const char{empty}* loc_database_get_license(struct loc_database{empty}* db);
+
+== Description
+
+loc_database_new() opens a new database from the given file descriptor.
+The file descriptor can be closed after this operation because the function is creating
+its own copy.
+
+If the database could be opened successfully, zero is returned. Otherwise a non-zero
+return code will indicate an error and errno will be set appropriately.
+
+Various meta-data about the database can be retrieved with
+loc_database_created_at(), loc_database_get_vendor(), loc_database_get_description(),
+and loc_database_get_license().
+
+== See Also
+
+link:libloc[3]
+
+== Authors
+
+Michael Tremer
diff --git a/man/loc_get_log_priority.txt b/man/loc_get_log_priority.txt
new file mode 100644 (file)
index 0000000..447a8c8
--- /dev/null
@@ -0,0 +1,28 @@
+= loc_get_log_priority(3)
+
+== Name
+
+loc_get_log_priority - Fetches the log level of a libloc context
+
+== Synopsis
+[verse]
+
+#include <libloc/libloc.h>
+
+int loc_get_log_priority(struct loc_ctx{empty}* ctx);
+
+== Description
+
+Returns the log priority of the given context.
+
+The returned integer is a valid syslog log level as defined in syslog(3).
+
+The default value is LOG_ERR.
+
+== See Also
+
+link:libloc[3]
+
+== Authors
+
+Michael Tremer
diff --git a/man/loc_new.txt b/man/loc_new.txt
new file mode 100644 (file)
index 0000000..c1000be
--- /dev/null
@@ -0,0 +1,35 @@
+= loc_new(3)
+
+== Name
+
+loc_new - Create a new libloc context
+
+== Synopsis
+[verse]
+
+#include <libloc/libloc.h>
+
+struct loc_ctx;
+
+int loc_new(struct loc_ctx{empty}*{empty}* ctx);
+
+struct loc_ctx{empty}* loc_ref(struct loc_ctx{empty}* ctx);
+
+struct loc_ctx{empty}* loc_unref(struct loc_ctx{empty}* ctx);
+
+== Description
+
+Every operation in libloc requires to set up a context first.
+This is done by calling loc_new(3).
+
+Every time another part of your code is holding a reference to the context,
+you will need to call loc_ref() to increase the reference counter.
+If you no longer need the context, you will need to call loc_unref().
+
+== See Also
+
+link:libloc[3]
+
+== Authors
+
+Michael Tremer
diff --git a/man/loc_set_log_fn.txt b/man/loc_set_log_fn.txt
new file mode 100644 (file)
index 0000000..00c1854
--- /dev/null
@@ -0,0 +1,29 @@
+= loc_set_log_fn(3)
+
+== Name
+
+loc_set_log_fn - Sets the log callback function
+
+== Synopsis
+[verse]
+
+#include <libloc/libloc.h>
+
+void loc_set_log_fn(struct loc_ctx{empty}* ctx,
+       void ({empty}*log_fn)(struct loc_ctx{empty}* ctx, int priority,
+       const char{empty}* file, int line, const char{empty}* fn, const char{empty}* format,
+       va_list args)
+
+== Description
+
+libloc can use the calling application's logging system by setting this callback.
+
+It will be called once for each log message according to the configured log level.
+
+== See Also
+
+link:libloc[3]
+
+== Authors
+
+Michael Tremer
diff --git a/man/loc_set_log_priority.txt b/man/loc_set_log_priority.txt
new file mode 100644 (file)
index 0000000..76556bb
--- /dev/null
@@ -0,0 +1,25 @@
+= loc_set_log_priority(3)
+
+== Name
+
+loc_set_log_priority - Sets the log level of a libloc context
+
+== Synopsis
+[verse]
+
+#include <libloc/libloc.h>
+
+void loc_set_log_priority(struct loc_ctx{empty}* ctx, int priority)
+
+== Description
+
+Sets the log priority of the given context. See loc_get_log_priority(3) for more details.
+
+== See Also
+
+link:libloc[3]
+link:loc_set_log_fn(3)
+
+== Authors
+
+Michael Tremer
index b38f21cf1ab8d7e5f8ff7bd445fd155d7b2a82f5..70352d2be23648e0ce772e7aa4519ee732df031b 100644 (file)
@@ -1,4 +1,4 @@
-= location(8)
+= location(1)
 
 == NAME
 location - Query the location database
@@ -15,6 +15,7 @@ location - Query the location database
 `location search-as STRING`
 `location update [--cron=daily|weekly|monthly]`
 `location verify`
+`location version`
 
 == DESCRIPTION
 `location` retrieves information from the location database.
@@ -38,7 +39,7 @@ or countries.
 
 == COMMANDS
 
-'export --directory=DIR [--format=FORMAT] [--family=ipv6|ipv4] [ASN|CC ...]'::
+'export [--directory=DIR] [--format=FORMAT] [--family=ipv6|ipv4] [ASN|CC ...]'::
        This command exports the whole database into the given directory.
        +
        The output can be filtered by only exporting a certain address family, or by passing
@@ -46,6 +47,9 @@ or countries.
        +
        The output format can be chosen with the '--format' parameter. For possible formats,
        please see below.
+       +
+       If the '--directory' option is omitted, the output will be written to stdout which
+       is useful when you want to load any custom exports straight into nftables or ipset.
 
 'get-as ASN [ASN...]'::
        This command returns the name of the owning organisation of the Autonomous
@@ -78,6 +82,11 @@ or countries.
        +
        See above for usage of the '--family' and '--format' parameters.
 
+'list-bogons [--family=[ipv6|ipv4]] [--format=FORMAT]'::
+       Lists all bogons (i.e. networks that are unknown to the database).
+       +
+       See above for usage of the '--family' and '--format' parameters.
+
 'lookup ADDRESS [ADDRESS...]'::
        This command returns the network the given IP address has been found in
        as well as its Autonomous System if that information is available.
@@ -101,6 +110,9 @@ or countries.
 'verify'::
        Verifies the downloaded database.
 
+'version'::
+       Shows the version information of the downloaded database.
+
 '--help'::
        Shows a short help text on using this program.
 
@@ -129,8 +141,9 @@ It will then try to download a file with that version from a mirror server.
 If the downloaded file is outdated, the next mirror will be tried until we
 have found a file that is recent enough.
 
-== BUGS
-Please report all bugs to the bugtracker at https://bugzilla.ipfire.org/.
+== BUG REPORTS
+Please report all bugs to the bugtracker at https://bugzilla.ipfire.org/;
+refer to https://wiki.ipfire.org/devel/bugzilla for details.
 
 == AUTHORS
 Michael Tremer
index 7673daa944ec436c99a79a259b7c661a54b081e6..711b12d3c826f3088ba36da903fe1fc2ab5b514f 100644 (file)
@@ -1 +1,2 @@
 de
+ka
index 5d2cc46cb74d6df89279de9d4852223d70c136e6..d954f9c0376c439f25b69ec4ea4d518625cbb63b 100644 (file)
@@ -1,12 +1,12 @@
+src/cron/location-update.in
 src/libloc.pc.in
-src/python/__init__.py.in
-src/python/database.py
-src/python/downloader.py
-src/python/export.py
-src/python/i18n.py
-src/python/importer.py
-src/python/location-importer.in
-src/python/location.in
-src/python/logger.py
+src/python/location/__init__.py
+src/python/location/database.py
+src/python/location/downloader.py
+src/python/location/export.py
+src/python/location/i18n.py
+src/python/location/logger.py
+src/scripts/location-importer.in
+src/scripts/location.in
 src/systemd/location-update.service.in
 src/systemd/location-update.timer.in
index 3cbcdd708637411b2ac76ea009dc39ddc9917ce5..f5e8944e51d5dd57f1afa5a7caba6fc5361bf108 100644 (file)
--- a/po/de.po
+++ b/po/de.po
@@ -7,7 +7,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: libloc 0\n"
 "Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2021-04-15 11:29+0000\n"
+"POT-Creation-Date: 2024-03-04 12:21+0000\n"
 "PO-Revision-Date: 2018-02-01 14:05+0000\n"
 "Last-Translator: Michael Tremer <michael.tremer@ipfire.org>\n"
 "Language-Team: German\n"
@@ -17,6 +17,9 @@ msgstr ""
 "Content-Transfer-Encoding: 8bit\n"
 "Plural-Forms: nplurals=2; plural=(n != 1);\n"
 
+msgid "Won't write binary output to stdout"
+msgstr ""
+
 msgid "Location Importer Command Line Interface"
 msgstr ""
 
@@ -79,6 +82,9 @@ msgstr ""
 msgid "Update WHOIS Information"
 msgstr ""
 
+msgid "Only update these sources"
+msgstr ""
+
 msgid "Update BGP Annoucements"
 msgstr ""
 
@@ -88,6 +94,15 @@ msgstr ""
 msgid "SERVER"
 msgstr ""
 
+msgid "Update Geofeeds"
+msgstr ""
+
+msgid "Update Feeds"
+msgstr ""
+
+msgid "Only update these feeds"
+msgstr ""
+
 msgid "Update overrides"
 msgstr ""
 
@@ -155,6 +170,9 @@ msgstr ""
 msgid "Hostile Networks safe to drop"
 msgstr ""
 
+msgid "Lists all bogons"
+msgstr ""
+
 msgid "Lists all countries"
 msgstr ""
 
@@ -208,6 +226,9 @@ msgstr ""
 msgid "Anycast"
 msgstr ""
 
+msgid "Hostile Network safe to drop"
+msgstr ""
+
 #, python-format
 msgid "Invalid ASN: %s"
 msgstr ""
diff --git a/po/ka.po b/po/ka.po
new file mode 100644 (file)
index 0000000..25e60b8
--- /dev/null
+++ b/po/ka.po
@@ -0,0 +1,282 @@
+# Georgian translation for libloc.
+# Copyright (C) 2023 libloc's authors.
+# This file is distributed under the same license as the libloc package.
+# Temuri Doghonadze <temuri.doghonadze@gmail.com>, 2023.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: libloc\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2024-03-04 12:21+0000\n"
+"PO-Revision-Date: 2023-02-22 08:57+0100\n"
+"Last-Translator: Temuri Doghonadze <temuri.doghonadze@gmail.com>\n"
+"Language-Team: Georgian <(nothing)>\n"
+"Language: ka\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+"X-Generator: Poedit 3.2.2\n"
+
+msgid "Won't write binary output to stdout"
+msgstr "ბინარული მონაცემები stdout-ზე გამოტანილი არ იქნება"
+
+msgid "Location Importer Command Line Interface"
+msgstr "მდებარეობის შემოტანის ბრძანების სტრიქონის ინტერფეისი"
+
+msgid "Enable debug output"
+msgstr "გამართვის გამოტანის ჩართვა"
+
+msgid "Enable quiet mode"
+msgstr "ჩუმი რეჟიმის ჩართვა"
+
+msgid "Database Hostname"
+msgstr "ბაზის ჰოსტის სახელი"
+
+msgid "HOST"
+msgstr "ჰოსტი"
+
+msgid "Database Name"
+msgstr "ბაზის სახელი"
+
+msgid "NAME"
+msgstr "სახელი"
+
+msgid "Database Username"
+msgstr "ბაზის მომხმარებლის სახელი"
+
+msgid "USERNAME"
+msgstr "მომხმარებლის სახელი"
+
+msgid "Database Password"
+msgstr "მონაცემთა ბაზის პაროლი"
+
+msgid "PASSWORD"
+msgstr "პაროლი"
+
+#. Write Database
+msgid "Write database to file"
+msgstr "მონაცემთა ბაზის ფაილში ჩაწრა"
+
+msgid "Database File"
+msgstr "ბაზის ფაილი"
+
+msgid "Signing Key"
+msgstr "ხელმოწერის გასაღები"
+
+msgid "Backup Signing Key"
+msgstr "სარეზერვო ხელმოწერის გასაღები"
+
+msgid "Sets the vendor"
+msgstr "მომწოდებლის დაყენება"
+
+msgid "Sets a description"
+msgstr "აღწერის დაყენება"
+
+msgid "Sets the license"
+msgstr "ლიცენზიის დაყენება"
+
+msgid "Database Format Version"
+msgstr "ბაზის ფორმატის ვერსია"
+
+#. Update WHOIS
+msgid "Update WHOIS Information"
+msgstr "WHOIS-ის ინფორმაციის განახლება"
+
+msgid "Only update these sources"
+msgstr ""
+
+msgid "Update BGP Annoucements"
+msgstr "BGP-ის ანონსების განახლება"
+
+msgid "Route Server to connect to"
+msgstr "რომელ რაუტის სერვერს დავუკავშირდე"
+
+msgid "SERVER"
+msgstr "სერვერი"
+
+#, fuzzy
+msgid "Update Geofeeds"
+msgstr "განახლება გადაფარავს"
+
+#, fuzzy
+msgid "Update Feeds"
+msgstr "განახლება გადაფარავს"
+
+msgid "Only update these feeds"
+msgstr ""
+
+msgid "Update overrides"
+msgstr "განახლება გადაფარავს"
+
+msgid "Files to import"
+msgstr "შემოსატანი ფაილები"
+
+msgid "Import countries"
+msgstr "ქვეყნების შემოტანა"
+
+msgid "File to import"
+msgstr "შემოსატანი ფაილი"
+
+msgid "Location Database Command Line Interface"
+msgstr "მდებარეობის ბაზის ბრძანების სტრიქონის ინტერფეისი"
+
+msgid "Path to database"
+msgstr "ბილიკი ბაზამდე"
+
+msgid "Public Signing Key"
+msgstr "საჯარო ხელმოწერის გასაღები"
+
+msgid "Show database version"
+msgstr "ბაზის ვერსიის ჩვენება"
+
+msgid "Lookup one or multiple IP addresses"
+msgstr "ერთი ან რამდენიმე IP მისამართის მოზებნა"
+
+msgid "Dump the entire database"
+msgstr "მთელი ბაზის დამპი"
+
+#. Update
+msgid "Update database"
+msgstr "ბაზის განახლება"
+
+msgid "Update the library only once per interval"
+msgstr "ბიბლიოთეკის მხოლოდ მითითებულ ინტერვალში განახლება"
+
+msgid "Verify the downloaded database"
+msgstr "გადმოწერილი ბაზის შემოწმება"
+
+msgid "Get information about one or multiple Autonomous Systems"
+msgstr "ერთ ან მეტ ავტონომიურ სისტემაზე ინფორმაციის მიღება"
+
+msgid "Search for Autonomous Systems that match the string"
+msgstr "ავტონომიური სისტემების ძებნა, რომლებიც სტრიქონს ემთხვევა"
+
+msgid "Lists all networks in an AS"
+msgstr "AS-ში ყველა ქსელის სია"
+
+msgid "Lists all networks in a country"
+msgstr "ქვეყნის ყველა ქსელის სია"
+
+msgid "Lists all networks with flags"
+msgstr "ქსელების ალმებით ჩვენება"
+
+msgid "Anonymous Proxies"
+msgstr "ანონიმური პროქსები"
+
+msgid "Satellite Providers"
+msgstr "სატელიტური პროვაიდერები"
+
+msgid "Anycasts"
+msgstr "Anycasts"
+
+msgid "Hostile Networks safe to drop"
+msgstr "უსაფრთხოდ დაბლოკვადი მტრული ქსელები"
+
+msgid "Lists all bogons"
+msgstr "ყველა ჭაობის სია"
+
+msgid "Lists all countries"
+msgstr "ყველა ქვეყნის სია"
+
+msgid "Show the name of the country"
+msgstr "ქვეყნის სახელის ჩვენება"
+
+msgid "Show the continent"
+msgstr "კონტინენტის ჩვენება"
+
+msgid "Exports data in many formats to load it into packet filters"
+msgstr "მონაცემების ბევრ ფორმატში გატანა მათი პაკეტის ფილტრებში ჩასატვირთად"
+
+msgid "Output format"
+msgstr "გამოტანის ფორმატი"
+
+msgid "Output directory"
+msgstr "გამოტანის საქაღალდე"
+
+msgid "Specify address family"
+msgstr "მიუთითეთ მისამართის ოჯახი"
+
+msgid "List country codes or ASNs to export"
+msgstr "ქვეყნის კოდების ან ASN-ების სია გასატანად"
+
+#, python-format
+msgid "Invalid IP address: %s"
+msgstr "არასწორი IP მისამართი: %s"
+
+#, python-format
+msgid "Nothing found for %(address)s"
+msgstr "%(address)s-სთვის ვერაფერი ვიპოვე"
+
+msgid "Network"
+msgstr "ქსელი"
+
+msgid "Country"
+msgstr "ქვეყანა"
+
+msgid "Autonomous System"
+msgstr "ავტონომიური სისტემა"
+
+msgid "Anonymous Proxy"
+msgstr "ანონიმური პროქსი"
+
+msgid "yes"
+msgstr "დიახ"
+
+msgid "Satellite Provider"
+msgstr "სატელიტური პროვაიდერი"
+
+msgid "Anycast"
+msgstr "Anycast"
+
+msgid "Hostile Network safe to drop"
+msgstr "უსაფრთხოდ დაბლოკვადი მტრული ქსელი"
+
+#, python-format
+msgid "Invalid ASN: %s"
+msgstr "არასწორი ASN: %s"
+
+#, python-format
+msgid "Could not find AS%s"
+msgstr "ვერ ვიპოვნე AS%s"
+
+#, python-format
+msgid "AS%(asn)s belongs to %(name)s"
+msgstr "AS%(asn)s ეკუთვნის %(name)s"
+
+msgid "The database has been updated recently"
+msgstr "ბაზა ახლახანს განახლდა"
+
+msgid "You must at least pass one flag"
+msgstr "აუცილებელია, ერთი ალამი მაინც გადასცეთ"
+
+#, python-format
+msgid "One Day"
+msgid_plural "%(days)s Days"
+msgstr[0] "1 დღე"
+msgstr[1] "%(days)s დღე"
+
+#, python-format
+msgid "One Hour"
+msgid_plural "%(hours)s Hours"
+msgstr[0] "1 საათი"
+msgstr[1] "%(hours)s საათი"
+
+#, python-format
+msgid "One Minute"
+msgid_plural "%(minutes)s Minutes"
+msgstr[0] "1 წუთი"
+msgstr[1] "%(minutes)s წუთი"
+
+#, python-format
+msgid "One Second"
+msgid_plural "%(seconds)s Seconds"
+msgstr[0] "1 წამი"
+msgstr[1] "%(seconds)s წამი"
+
+msgid "Now"
+msgstr "ახლა"
+
+#, python-format
+msgid "%s ago"
+msgstr "%s-ის წინ"
index 3ccbdb802c639ff803c4078a5253fe024eda1a11..a0ca3cf0c61d2f366a69ba246c33b17b45b1543e 100644 (file)
@@ -5,6 +5,7 @@
 *.lo
 *.trs
 libloc.pc
+test-address
 test-as
 test-libloc
 test-database
diff --git a/src/address.c b/src/address.c
new file mode 100644 (file)
index 0000000..d338243
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+       libloc - A library to determine the location of someone on the Internet
+
+       Copyright (C) 2022 IPFire Development Team <info@ipfire.org>
+
+       This library is free software; you can redistribute it and/or
+       modify it under the terms of the GNU Lesser General Public
+       License as published by the Free Software Foundation; either
+       version 2.1 of the License, or (at your option) any later version.
+
+       This library is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+       Lesser General Public License for more details.
+*/
+
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <libloc/address.h>
+#include <libloc/compat.h>
+
+#define LOC_ADDRESS_BUFFERS                            6
+#define LOC_ADDRESS_BUFFER_LENGTH              INET6_ADDRSTRLEN
+
+static char __loc_address_buffers[LOC_ADDRESS_BUFFERS][LOC_ADDRESS_BUFFER_LENGTH + 1];
+static int  __loc_address_buffer_idx = 0;
+
+static const char* __loc_address6_str(const struct in6_addr* address, char* buffer, size_t length) {
+       return inet_ntop(AF_INET6, address, buffer, length);
+}
+
+static const char* __loc_address4_str(const struct in6_addr* address, char* buffer, size_t length) {
+       struct in_addr address4 = {
+               .s_addr = address->s6_addr32[3],
+       };
+
+       return inet_ntop(AF_INET, &address4, buffer, length);
+}
+
+const char* loc_address_str(const struct in6_addr* address) {
+       if (!address)
+               return NULL;
+
+       // Select buffer
+       char* buffer = __loc_address_buffers[__loc_address_buffer_idx++];
+
+       // Prevent index from overflow
+       __loc_address_buffer_idx %= LOC_ADDRESS_BUFFERS;
+
+       if (IN6_IS_ADDR_V4MAPPED(address))
+               return __loc_address4_str(address, buffer, LOC_ADDRESS_BUFFER_LENGTH);
+       else
+               return __loc_address6_str(address, buffer, LOC_ADDRESS_BUFFER_LENGTH);
+}
+
+static void loc_address_from_address4(struct in6_addr* address,
+               const struct in_addr* address4) {
+       address->s6_addr32[0] = 0;
+       address->s6_addr32[1] = 0;
+       address->s6_addr32[2] = htonl(0xffff);
+       address->s6_addr32[3] = address4->s_addr;
+}
+
+int loc_address_parse(struct in6_addr* address, unsigned int* prefix, const char* string) {
+       char buffer[INET6_ADDRSTRLEN + 4];
+       int r;
+
+       if (!address || !string) {
+               errno = EINVAL;
+               return 1;
+       }
+
+       // Copy the string into the buffer
+       r = snprintf(buffer, sizeof(buffer) - 1, "%s", string);
+       if (r < 0)
+               return 1;
+
+       // Find /
+       char* p = strchr(buffer, '/');
+       if (p) {
+               // Terminate the IP address
+               *p++ = '\0';
+       }
+
+       int family = AF_UNSPEC;
+
+       // Try parsing as an IPv6 address
+       r = inet_pton(AF_INET6, buffer, address);
+       switch (r) {
+               // This is not a valid IPv6 address
+               case 0:
+                       break;
+
+               // This is a valid IPv6 address
+               case 1:
+                       family = AF_INET6;
+                       break;
+
+               // Unexpected error
+               default:
+                       return 1;
+       }
+
+       // Try parsing as an IPv4 address
+       if (!family) {
+               struct in_addr address4;
+
+               r = inet_pton(AF_INET, buffer, &address4);
+               switch (r) {
+                       // This was not a valid IPv4 address
+                       case 0:
+                               break;
+
+                       // This was a valid IPv4 address
+                       case 1:
+                               family = AF_INET;
+
+                               // Copy the result
+                               loc_address_from_address4(address, &address4);
+                               break;
+
+                       // Unexpected error
+                       default:
+                               return 1;
+               }
+       }
+
+       // Invalid input
+       if (family == AF_UNSPEC) {
+               errno = EINVAL;
+               return 1;
+       }
+
+       // Did the user request a prefix?
+       if (prefix) {
+               // Set the prefix to the default value
+               const unsigned int max_prefix = loc_address_family_bit_length(family);
+
+               // Parse the actual string
+               if (p) {
+                       *prefix = strtol(p, NULL, 10);
+
+                       // Check if prefix is within bounds
+                       if (*prefix > max_prefix) {
+                               errno = EINVAL;
+                               return 1;
+                       }
+
+               // If the string didn't contain a prefix, we set the maximum
+               } else {
+                       *prefix = max_prefix;
+               }
+       }
+
+       return 0;
+}
index 5acbb8a9d8e056a130b0db1944d118783401de05..50805e771b2d7c09ada7bf94b93bf511b1c4ad7e 100644 (file)
        Lesser General Public License for more details.
 */
 
-#include <errno.h>
 #include <stdlib.h>
 
-#include <loc/as.h>
-#include <loc/as-list.h>
-#include <loc/private.h>
+#include <libloc/as.h>
+#include <libloc/as-list.h>
+#include <libloc/compat.h>
+#include <libloc/private.h>
 
 struct loc_as_list {
        struct loc_ctx* ctx;
@@ -31,14 +31,18 @@ struct loc_as_list {
        size_t size;
 };
 
-static int loc_as_list_grow(struct loc_as_list* list, size_t size) {
+static int loc_as_list_grow(struct loc_as_list* list) {
+       size_t size = list->elements_size * 2;
+       if (size < 1024)
+               size = 1024;
+
        DEBUG(list->ctx, "Growing AS list %p by %zu to %zu\n",
                list, size, list->elements_size + size);
 
        struct loc_as** elements = reallocarray(list->elements,
                        list->elements_size + size, sizeof(*list->elements));
        if (!elements)
-               return -errno;
+               return 1;
 
        list->elements = elements;
        list->elements_size += size;
@@ -50,7 +54,7 @@ LOC_EXPORT int loc_as_list_new(struct loc_ctx* ctx,
                struct loc_as_list** list) {
        struct loc_as_list* l = calloc(1, sizeof(*l));
        if (!l)
-               return -ENOMEM;
+               return 1;
 
        l->ctx = loc_ref(ctx);
        l->refcount = 1;
@@ -124,7 +128,7 @@ LOC_EXPORT int loc_as_list_append(
 
        // Check if we have space left
        if (list->size >= list->elements_size) {
-               int r = loc_as_list_grow(list, 64);
+               int r = loc_as_list_grow(list);
                if (r)
                        return r;
        }
@@ -159,3 +163,12 @@ LOC_EXPORT int loc_as_list_contains_number(
 
        return r;
 }
+
+static int __loc_as_cmp(const void* as1, const void* as2) {
+       return loc_as_cmp(*(struct loc_as**)as1, *(struct loc_as**)as2);
+}
+
+LOC_EXPORT void loc_as_list_sort(struct loc_as_list* list) {
+       // Sort everything
+       qsort(list->elements, list->size, sizeof(*list->elements), __loc_as_cmp);
+}
index 757bf3d3b742b19b29d426974beaf4813b953384..97c0a567a48c9760ac30c800b43df957cae9dbc7 100644 (file)
--- a/src/as.c
+++ b/src/as.c
 #  include <endian.h>
 #endif
 
-#include <loc/libloc.h>
-#include <loc/as.h>
-#include <loc/compat.h>
-#include <loc/format.h>
-#include <loc/private.h>
-#include <loc/stringpool.h>
+#include <libloc/libloc.h>
+#include <libloc/as.h>
+#include <libloc/compat.h>
+#include <libloc/format.h>
+#include <libloc/private.h>
+#include <libloc/stringpool.h>
 
 struct loc_as {
        struct loc_ctx* ctx;
@@ -42,7 +42,7 @@ struct loc_as {
 LOC_EXPORT int loc_as_new(struct loc_ctx* ctx, struct loc_as** as, uint32_t number) {
        struct loc_as* a = calloc(1, sizeof(*a));
        if (!a)
-               return -ENOMEM;
+               return 1;
 
        a->ctx = loc_ref(ctx);
        a->refcount = 1;
index 103536cdc52f2d7599734c1310622c867dc54258..536fd9e121c08ff6b98784de36b99e121647a8ba 100644 (file)
 #include <errno.h>
 #include <stdlib.h>
 
-#include <loc/country.h>
-#include <loc/country-list.h>
-#include <loc/private.h>
+#include <libloc/compat.h>
+#include <libloc/country.h>
+#include <libloc/country-list.h>
+#include <libloc/private.h>
 
 struct loc_country_list {
        struct loc_ctx* ctx;
@@ -31,14 +32,18 @@ struct loc_country_list {
        size_t size;
 };
 
-static int loc_country_list_grow(struct loc_country_list* list, size_t size) {
+static int loc_country_list_grow(struct loc_country_list* list) {
+       size_t size = list->elements_size * 2;
+       if (size < 1024)
+               size = 1024;
+
        DEBUG(list->ctx, "Growing country list %p by %zu to %zu\n",
                list, size, list->elements_size + size);
 
        struct loc_country** elements = reallocarray(list->elements,
                        list->elements_size + size, sizeof(*list->elements));
        if (!elements)
-               return -errno;
+               return 1;
 
        list->elements = elements;
        list->elements_size += size;
@@ -77,9 +82,6 @@ static void loc_country_list_free(struct loc_country_list* list) {
 }
 
 LOC_EXPORT struct loc_country_list* loc_country_list_unref(struct loc_country_list* list) {
-       if (!list)
-               return NULL;
-
        if (--list->refcount > 0)
                return list;
 
@@ -124,7 +126,7 @@ LOC_EXPORT int loc_country_list_append(
 
        // Check if we have space left
        if (list->size >= list->elements_size) {
-               int r = loc_country_list_grow(list, 64);
+               int r = loc_country_list_grow(list);
                if (r)
                        return r;
        }
@@ -155,8 +157,8 @@ LOC_EXPORT int loc_country_list_contains_code(
                // Ignore invalid country codes which would never match
                if (errno == EINVAL)
                        return 0;
-               else
-                       return r;
+
+               return r;
        }
 
        r = loc_country_list_contains(list, country);
@@ -164,3 +166,12 @@ LOC_EXPORT int loc_country_list_contains_code(
 
        return r;
 }
+
+static int __loc_country_cmp(const void* country1, const void* country2) {
+       return loc_country_cmp(*(struct loc_country**)country1, *(struct loc_country**)country2);
+}
+
+LOC_EXPORT void loc_country_list_sort(struct loc_country_list* list) {
+       // Sort everything
+       qsort(list->elements, list->size, sizeof(*list->elements), __loc_country_cmp);
+}
index 3ad3aacdaee8fb642442473ff22896004a7aa68c..8152a89752d2ca9cb1eb61f59b762c31558fd494 100644 (file)
 #include <stdlib.h>
 #include <string.h>
 
-#include <loc/libloc.h>
-#include <loc/compat.h>
-#include <loc/country.h>
-#include <loc/private.h>
+#include <libloc/libloc.h>
+#include <libloc/compat.h>
+#include <libloc/country.h>
+#include <libloc/network.h>
+#include <libloc/private.h>
+
+static const struct loc_special_country {
+       const char code[3];
+       enum loc_network_flags flags;
+} loc_special_countries[] = {
+       { "A1", LOC_NETWORK_FLAG_ANONYMOUS_PROXY },
+       { "A2", LOC_NETWORK_FLAG_SATELLITE_PROVIDER },
+       { "A3", LOC_NETWORK_FLAG_ANYCAST },
+       { "XD", LOC_NETWORK_FLAG_DROP },
+       { "", 0 },
+};
 
 struct loc_country {
        struct loc_ctx* ctx;
        int refcount;
 
-       char* code;
-       char* continent_code;
+       // Store the country code in a 3 byte buffer. Two bytes for the code, and NULL so
+       // that we can use strcmp() and return a pointer.
+       char code[3];
+       char continent_code[3];
 
        char* name;
 };
@@ -42,12 +56,13 @@ LOC_EXPORT int loc_country_new(struct loc_ctx* ctx, struct loc_country** country
 
        struct loc_country* c = calloc(1, sizeof(*c));
        if (!c)
-               return -ENOMEM;
+               return 1;
 
        c->ctx = loc_ref(ctx);
        c->refcount = 1;
 
-       c->code = strdup(country_code);
+       // Set the country code
+       loc_country_code_copy(c->code, country_code);
 
        DEBUG(c->ctx, "Country %s allocated at %p\n", c->code, c);
        *country = c;
@@ -64,12 +79,6 @@ LOC_EXPORT struct loc_country* loc_country_ref(struct loc_country* country) {
 static void loc_country_free(struct loc_country* country) {
        DEBUG(country->ctx, "Releasing country %s %p\n", country->code, country);
 
-       if (country->code)
-               free(country->code);
-
-       if (country->continent_code)
-               free(country->continent_code);
-
        if (country->name)
                free(country->name);
 
@@ -82,7 +91,6 @@ LOC_EXPORT struct loc_country* loc_country_unref(struct loc_country* country) {
                return NULL;
 
        loc_country_free(country);
-
        return NULL;
 }
 
@@ -91,17 +99,21 @@ LOC_EXPORT const char* loc_country_get_code(struct loc_country* country) {
 }
 
 LOC_EXPORT const char* loc_country_get_continent_code(struct loc_country* country) {
+       if (!*country->continent_code)
+               return NULL;
+
        return country->continent_code;
 }
 
 LOC_EXPORT int loc_country_set_continent_code(struct loc_country* country, const char* continent_code) {
-       // XXX validate input
-
-       // Free previous value
-       if (country->continent_code)
-               free(country->continent_code);
+       // Check for valid input
+       if (!continent_code || strlen(continent_code) != 2) {
+               errno = EINVAL;
+               return 1;
+       }
 
-       country->continent_code = strdup(continent_code);
+       // Store the code
+       loc_country_code_copy(country->continent_code, continent_code);
 
        return 0;
 }
@@ -114,37 +126,36 @@ LOC_EXPORT int loc_country_set_name(struct loc_country* country, const char* nam
        if (country->name)
                free(country->name);
 
-       if (name)
+       if (name) {
                country->name = strdup(name);
 
+               // Report error if we could not copy the string
+               if (!country->name)
+                       return 1;
+       }
+
        return 0;
 }
 
 LOC_EXPORT int loc_country_cmp(struct loc_country* country1, struct loc_country* country2) {
-       return strcmp(country1->code, country2->code);
+       return strncmp(country1->code, country2->code, 2);
 }
 
 int loc_country_new_from_database_v1(struct loc_ctx* ctx, struct loc_stringpool* pool,
                struct loc_country** country, const struct loc_database_country_v1* dbobj) {
-       char buffer[3];
+       char buffer[3] = "XX";
 
        // Read country code
        loc_country_code_copy(buffer, dbobj->code);
 
-       // Terminate buffer
-       buffer[2] = '\0';
-
        // Create a new country object
        int r = loc_country_new(ctx, country, buffer);
        if (r)
                return r;
 
-       // Continent Code
-       loc_country_code_copy(buffer, dbobj->continent_code);
-
-       r = loc_country_set_continent_code(*country, buffer);
-       if (r)
-               goto FAIL;
+       // Copy continent code
+       if (*dbobj->continent_code)
+               loc_country_code_copy((*country)->continent_code, dbobj->continent_code);
 
        // Set name
        const char* name = loc_stringpool_get(pool, be32toh(dbobj->name));
@@ -163,20 +174,20 @@ FAIL:
 
 int loc_country_to_database_v1(struct loc_country* country,
                struct loc_stringpool* pool, struct loc_database_country_v1* dbobj) {
+       off_t name = 0;
+
        // Add country code
-       for (unsigned int i = 0; i < 2; i++) {
-               dbobj->code[i] = country->code[i] ? country->code[i] : '\0';
-       }
+       if (*country->code)
+               loc_country_code_copy(dbobj->code, country->code);
 
        // Add continent code
-       if (country->continent_code) {
-               for (unsigned int i = 0; i < 2; i++) {
-                       dbobj->continent_code[i] = country->continent_code[i] ? country->continent_code[i] : '\0';
-               }
-       }
+       if (*country->continent_code)
+               loc_country_code_copy(dbobj->continent_code, country->continent_code);
 
        // Save the name string in the string pool
-       off_t name = loc_stringpool_add(pool, country->name ? country->name : "");
+       if (country->name)
+               name = loc_stringpool_add(pool, country->name);
+
        dbobj->name = htobe32(name);
 
        return 0;
@@ -197,6 +208,27 @@ LOC_EXPORT int loc_country_code_is_valid(const char* cc) {
                        return 0;
        }
 
+       // The code cannot begin with an X (those are reserved for private use)
+       if (*cc == 'X')
+               return 0;
+
        // Looks valid
        return 1;
 }
+
+LOC_EXPORT int loc_country_special_code_to_flag(const char* cc) {
+       // Check if we got some input
+       if (!cc || !*cc) {
+               errno = EINVAL;
+               return -1;
+       }
+
+       // Return flags for any known special country
+       for (const struct loc_special_country* country = loc_special_countries;
+                       country->flags; country++) {
+               if (strncmp(country->code, cc, 2) == 0)
+                       return country->flags;
+       }
+
+       return 0;
+}
diff --git a/src/cron/location-update.in b/src/cron/location-update.in
new file mode 100644 (file)
index 0000000..232de10
--- /dev/null
@@ -0,0 +1,21 @@
+#!/bin/bash
+###############################################################################
+#                                                                             #
+# libloc - A library to determine the location of someone on the Internet     #
+#                                                                             #
+# Copyright (C) 2022 IPFire Development Team <info@ipfire.org>                #
+#                                                                             #
+# This library is free software; you can redistribute it and/or               #
+# modify it under the terms of the GNU Lesser General Public                  #
+# License as published by the Free Software Foundation; either                #
+# version 2.1 of the License, or (at your option) any later version.          #
+#                                                                             #
+# This library is distributed in the hope that it will be useful,             #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of              #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU           #
+# Lesser General Public License for more details.                             #
+#                                                                             #
+###############################################################################
+
+# Call the location database updater
+exec @bindir@/location update
index b92a803d5d3db9a8be49049ee803d7a06f6180e2..617b61eb5a8fe5966136aa54c7cc690579fa2bba 100644 (file)
 #include <openssl/evp.h>
 #include <openssl/pem.h>
 
-#include <loc/libloc.h>
-#include <loc/as.h>
-#include <loc/as-list.h>
-#include <loc/compat.h>
-#include <loc/country.h>
-#include <loc/country-list.h>
-#include <loc/database.h>
-#include <loc/format.h>
-#include <loc/network.h>
-#include <loc/private.h>
-#include <loc/stringpool.h>
+#include <libloc/libloc.h>
+#include <libloc/address.h>
+#include <libloc/as.h>
+#include <libloc/as-list.h>
+#include <libloc/compat.h>
+#include <libloc/country.h>
+#include <libloc/country-list.h>
+#include <libloc/database.h>
+#include <libloc/format.h>
+#include <libloc/network.h>
+#include <libloc/network-list.h>
+#include <libloc/private.h>
+#include <libloc/stringpool.h>
+
+struct loc_database_objects {
+       char* data;
+       size_t length;
+       size_t count;
+};
+
+struct loc_database_signature {
+       const char* data;
+       size_t length;
+};
 
 struct loc_database {
        struct loc_ctx* ctx;
@@ -61,28 +74,26 @@ struct loc_database {
        off_t license;
 
        // Signatures
-       char* signature1;
-       size_t signature1_length;
-       char* signature2;
-       size_t signature2_length;
+       struct loc_database_signature signature1;
+       struct loc_database_signature signature2;
+
+       // Data mapped into memory
+       char* data;
+       off_t length;
+
+       struct loc_stringpool* pool;
 
        // ASes in the database
-       struct loc_database_as_v1* as_v1;
-       size_t as_count;
+       struct loc_database_objects as_objects;
 
        // Network tree
-       struct loc_database_network_node_v1* network_nodes_v1;
-       size_t network_nodes_count;
+       struct loc_database_objects network_node_objects;
 
        // Networks
-       struct loc_database_network_v1* networks_v1;
-       size_t networks_count;
+       struct loc_database_objects network_objects;
 
        // Countries
-       struct loc_database_country_v1* countries_v1;
-       size_t countries_count;
-
-       struct loc_stringpool* pool;
+       struct loc_database_objects country_objects;
 };
 
 #define MAX_STACK_DEPTH 256
@@ -121,11 +132,72 @@ struct loc_database_enumerator {
        int network_stack_depth;
        unsigned int* networks_visited;
 
-       // For subnet search
+       // For subnet search and bogons
        struct loc_network_list* stack;
+       struct loc_network_list* subnets;
+
+       // For bogons
+       struct in6_addr gap6_start;
+       struct in6_addr gap4_start;
 };
 
-static int loc_database_read_magic(struct loc_database* db) {
+/*
+       Checks if it is safe to read the buffer of size length starting at p.
+*/
+#define loc_database_check_boundaries(db, p) \
+       __loc_database_check_boundaries(db, (const char*)p, sizeof(*p))
+
+static inline int __loc_database_check_boundaries(struct loc_database* db,
+               const char* p, const size_t length) {
+       size_t offset = p - db->data;
+
+       // Return if everything is within the boundary
+       if (offset <= db->length - length)
+               return 1;
+
+       DEBUG(db->ctx, "Database read check failed at %p for %zu byte(s)\n", p, length);
+       DEBUG(db->ctx, "  p      = %p (offset = %jd, length = %zu)\n", p, offset, length);
+       DEBUG(db->ctx, "  data   = %p (length = %zu)\n", db->data, db->length);
+       DEBUG(db->ctx, "  end    = %p\n", db->data + db->length);
+       DEBUG(db->ctx, "  overflow of %zu byte(s)\n", offset + length - db->length);
+
+       // Otherwise raise EFAULT
+       errno = EFAULT;
+       return 0;
+}
+
+/*
+       Returns a pointer to the n-th object
+*/
+static inline char* loc_database_object(struct loc_database* db,
+               const struct loc_database_objects* objects, const size_t length, const off_t n) {
+       // Calculate offset
+       const off_t offset = n * length;
+
+       // Return a pointer to where the object lies
+       char* object = objects->data + offset;
+
+       // Check if the object is part of the memory
+       if (!__loc_database_check_boundaries(db, object, length))
+               return NULL;
+
+       return object;
+}
+
+static int loc_database_version_supported(struct loc_database* db, uint8_t version) {
+       switch (version) {
+               // Supported versions
+               case LOC_DATABASE_VERSION_1:
+                       return 1;
+
+               default:
+                       ERROR(db->ctx, "Database version %d is not supported\n", version);
+                       errno = ENOTSUP;
+                       return 0;
+       }
+}
+
+static int loc_database_check_magic(struct loc_database* db) {
        struct loc_database_magic magic;
 
        // Read from file
@@ -135,200 +207,177 @@ static int loc_database_read_magic(struct loc_database* db) {
        if (bytes_read < sizeof(magic)) {
                ERROR(db->ctx, "Could not read enough data to validate magic bytes\n");
                DEBUG(db->ctx, "Read %zu bytes, but needed %zu\n", bytes_read, sizeof(magic));
-               return -ENOMSG;
+               goto ERROR;
        }
 
        // Compare magic bytes
-       if (memcmp(LOC_DATABASE_MAGIC, magic.magic, strlen(LOC_DATABASE_MAGIC)) == 0) {
+       if (memcmp(magic.magic, LOC_DATABASE_MAGIC, sizeof(magic.magic)) == 0) {
                DEBUG(db->ctx, "Magic value matches\n");
 
+               // Do we support this version?
+               if (!loc_database_version_supported(db, magic.version))
+                       return 1;
+
                // Parse version
                db->version = magic.version;
 
                return 0;
        }
 
+ERROR:
        ERROR(db->ctx, "Unrecognized file type\n");
+       errno = ENOMSG;
 
        // Return an error
        return 1;
 }
 
-static int loc_database_read_as_section_v1(struct loc_database* db,
-               const struct loc_database_header_v1* header) {
-       off_t as_offset  = be32toh(header->as_offset);
-       size_t as_length = be32toh(header->as_length);
-
-       DEBUG(db->ctx, "Reading AS section from %jd (%zu bytes)\n", (intmax_t)as_offset, as_length);
+/*
+       Maps the entire database into memory
+*/
+static int loc_database_mmap(struct loc_database* db) {
+       int r;
 
-       if (as_length > 0) {
-               db->as_v1 = mmap(NULL, as_length, PROT_READ,
-                       MAP_SHARED, fileno(db->f), as_offset);
+       // Get file descriptor
+       int fd = fileno(db->f);
 
-               if (db->as_v1 == MAP_FAILED)
-                       return -errno;
+       // Determine the length of the database
+       db->length = lseek(fd, 0, SEEK_END);
+       if (db->length < 0) {
+               ERROR(db->ctx, "Could not determine the length of the database: %m\n");
+               return 1;
        }
 
-       db->as_count = as_length / sizeof(*db->as_v1);
-
-       INFO(db->ctx, "Read %zu ASes from the database\n", db->as_count);
-
-       return 0;
-}
-
-static int loc_database_read_network_nodes_section_v1(struct loc_database* db,
-               const struct loc_database_header_v1* header) {
-       off_t network_nodes_offset  = be32toh(header->network_tree_offset);
-       size_t network_nodes_length = be32toh(header->network_tree_length);
-
-       DEBUG(db->ctx, "Reading network nodes section from %jd (%zu bytes)\n",
-               (intmax_t)network_nodes_offset, network_nodes_length);
-
-       if (network_nodes_length > 0) {
-               db->network_nodes_v1 = mmap(NULL, network_nodes_length, PROT_READ,
-                       MAP_SHARED, fileno(db->f), network_nodes_offset);
+       rewind(db->f);
 
-               if (db->network_nodes_v1 == MAP_FAILED)
-                       return -errno;
+       // Map all data
+       db->data = mmap(NULL, db->length, PROT_READ, MAP_SHARED, fd, 0);
+       if (db->data == MAP_FAILED) {
+               ERROR(db->ctx, "Could not map the database: %m\n");
+               db->data = NULL;
+               return 1;
        }
 
-       db->network_nodes_count = network_nodes_length / sizeof(*db->network_nodes_v1);
-
-       INFO(db->ctx, "Read %zu network nodes from the database\n", db->network_nodes_count);
-
-       return 0;
-}
-
-static int loc_database_read_networks_section_v1(struct loc_database* db,
-               const struct loc_database_header_v1* header) {
-       off_t networks_offset  = be32toh(header->network_data_offset);
-       size_t networks_length = be32toh(header->network_data_length);
-
-       DEBUG(db->ctx, "Reading networks section from %jd (%zu bytes)\n",
-               (intmax_t)networks_offset, networks_length);
+       DEBUG(db->ctx, "Mapped database of %zu byte(s) at %p\n", db->length, db->data);
 
-       if (networks_length > 0) {
-               db->networks_v1 = mmap(NULL, networks_length, PROT_READ,
-                       MAP_SHARED, fileno(db->f), networks_offset);
-
-               if (db->networks_v1 == MAP_FAILED)
-                       return -errno;
+       // Tell the system that we expect to read data randomly
+       r = madvise(db->data, db->length, MADV_RANDOM);
+       if (r) {
+               ERROR(db->ctx, "madvise() failed: %m\n");
+               return r;
        }
 
-       db->networks_count = networks_length / sizeof(*db->networks_v1);
-
-       INFO(db->ctx, "Read %zu networks from the database\n", db->networks_count);
-
        return 0;
 }
 
-static int loc_database_read_countries_section_v1(struct loc_database* db,
-               const struct loc_database_header_v1* header) {
-       off_t countries_offset  = be32toh(header->countries_offset);
-       size_t countries_length = be32toh(header->countries_length);
-
-       DEBUG(db->ctx, "Reading countries section from %jd (%zu bytes)\n",
-               (intmax_t)countries_offset, countries_length);
-
-       if (countries_length > 0) {
-               db->countries_v1 = mmap(NULL, countries_length, PROT_READ,
-                       MAP_SHARED, fileno(db->f), countries_offset);
-
-               if (db->countries_v1 == MAP_FAILED)
-                       return -errno;
-       }
-
-       db->countries_count = countries_length / sizeof(*db->countries_v1);
-
-       INFO(db->ctx, "Read %zu countries from the database\n",
-               db->countries_count);
+/*
+       Maps arbitrary objects from the database into memory.
+*/
+static int loc_database_map_objects(struct loc_database* db, struct loc_database_objects* objects,
+               const size_t size, const off_t offset, const size_t length) {
+       // Store parameters
+       objects->data   = db->data + offset;
+       objects->length = length;
+       objects->count  = objects->length / size;
 
        return 0;
 }
 
 static int loc_database_read_signature(struct loc_database* db,
-               char** dst, char* src, size_t length) {
+               struct loc_database_signature* signature, const char* data, const size_t length) {
        // Check for a plausible signature length
        if (length > LOC_SIGNATURE_MAX_LENGTH) {
                ERROR(db->ctx, "Signature too long: %zu\n", length);
-               return -EINVAL;
+               errno = EINVAL;
+               return 1;
        }
 
-       DEBUG(db->ctx, "Reading signature of %zu bytes\n", length);
+       // Store data & length
+       signature->data = data;
+       signature->length = length;
 
-       // Allocate space
-       *dst = malloc(length);
-       if (!*dst)
-               return -ENOMEM;
+       DEBUG(db->ctx, "Read signature of %zu byte(s) at %p\n",
+               signature->length, signature->data);
 
-       // Copy payload
-       memcpy(*dst, src, length);
+       hexdump(db->ctx, signature->data, signature->length);
 
        return 0;
 }
 
 static int loc_database_read_header_v1(struct loc_database* db) {
-       struct loc_database_header_v1 header;
+       const struct loc_database_header_v1* header =
+               (const struct loc_database_header_v1*)(db->data + LOC_DATABASE_MAGIC_SIZE);
        int r;
 
-       // Read from file
-       size_t size = fread(&header, 1, sizeof(header), db->f);
+       DEBUG(db->ctx, "Reading header at %p\n", header);
 
-       if (size < sizeof(header)) {
+       // Check if we can read the header
+       if (!loc_database_check_boundaries(db, header)) {
                ERROR(db->ctx, "Could not read enough data for header\n");
-               return -ENOMSG;
+               return 1;
        }
 
-       // Copy over data
-       db->created_at  = be64toh(header.created_at);
-       db->vendor      = be32toh(header.vendor);
-       db->description = be32toh(header.description);
-       db->license     = be32toh(header.license);
+       // Dump the entire header
+       hexdump(db->ctx, header, sizeof(*header));
 
-       db->signature1_length = be16toh(header.signature1_length);
-       db->signature2_length = be16toh(header.signature2_length);
+       // Copy over data
+       db->created_at  = be64toh(header->created_at);
+       db->vendor      = be32toh(header->vendor);
+       db->description = be32toh(header->description);
+       db->license     = be32toh(header->license);
 
        // Read signatures
-       if (db->signature1_length) {
-               r = loc_database_read_signature(db, &db->signature1,
-                       header.signature1, db->signature1_length);
-               if (r)
-                       return r;
-       }
+       r = loc_database_read_signature(db, &db->signature1,
+               header->signature1, be16toh(header->signature1_length));
+       if (r)
+               return r;
 
-       if (db->signature2_length) {
-               r = loc_database_read_signature(db, &db->signature2,
-                       header.signature2, db->signature2_length);
-               if (r)
-                       return r;
-       }
+       r = loc_database_read_signature(db, &db->signature2,
+               header->signature2, be16toh(header->signature2_length));
+       if (r)
+               return r;
 
-       // Open pool
-       off_t pool_offset  = be32toh(header.pool_offset);
-       size_t pool_length = be32toh(header.pool_length);
+       const char* stringpool_start = db->data + be32toh(header->pool_offset);
+       size_t stringpool_length = be32toh(header->pool_length);
 
-       r = loc_stringpool_open(db->ctx, &db->pool,
-               db->f, pool_length, pool_offset);
+       // Check if the stringpool is part of the mapped area
+       if (!__loc_database_check_boundaries(db, stringpool_start, stringpool_length))
+               return 1;
+
+       // Open the stringpool
+       r = loc_stringpool_open(db->ctx, &db->pool, stringpool_start, stringpool_length);
        if (r)
                return r;
 
-       // AS section
-       r = loc_database_read_as_section_v1(db, &header);
+       // Map AS objects
+       r = loc_database_map_objects(db, &db->as_objects,
+               sizeof(struct loc_database_as_v1),
+               be32toh(header->as_offset),
+               be32toh(header->as_length));
        if (r)
                return r;
 
-       // Network Nodes
-       r = loc_database_read_network_nodes_section_v1(db, &header);
+       // Map Network Nodes
+       r = loc_database_map_objects(db, &db->network_node_objects,
+               sizeof(struct loc_database_network_node_v1),
+               be32toh(header->network_tree_offset),
+               be32toh(header->network_tree_length));
        if (r)
                return r;
 
-       // Networks
-       r = loc_database_read_networks_section_v1(db, &header);
+       // Map Networks
+       r = loc_database_map_objects(db, &db->network_objects,
+               sizeof(struct loc_database_network_v1),
+               be32toh(header->network_data_offset),
+               be32toh(header->network_data_length));
        if (r)
                return r;
 
-       // countries
-       r = loc_database_read_countries_section_v1(db, &header);
+       // Map countries
+       r = loc_database_map_objects(db, &db->country_objects,
+               sizeof(struct loc_database_country_v1),
+               be32toh(header->countries_offset),
+               be32toh(header->countries_length));
        if (r)
                return r;
 
@@ -348,30 +397,47 @@ static int loc_database_read_header(struct loc_database* db) {
        }
 }
 
-static int loc_database_read(struct loc_database* db, FILE* f) {
-       clock_t start = clock();
-
+static int loc_database_clone_handle(struct loc_database* db, FILE* f) {
+       // Fetch the FD of the original handle
        int fd = fileno(f);
 
        // Clone file descriptor
        fd = dup(fd);
        if (!fd) {
                ERROR(db->ctx, "Could not duplicate file descriptor\n");
-               return -1;
+               return 1;
        }
 
        // Reopen the file so that we can keep our own file handle
        db->f = fdopen(fd, "r");
        if (!db->f) {
                ERROR(db->ctx, "Could not re-open database file\n");
-               return -1;
+               return 1;
        }
 
        // Rewind to the start of the file
        rewind(db->f);
 
+       return 0;
+}
+
+static int loc_database_open(struct loc_database* db, FILE* f) {
+       int r;
+
+       clock_t start = clock();
+
+       // Clone the file handle
+       r = loc_database_clone_handle(db, f);
+       if (r)
+               return r;
+
        // Read magic bytes
-       int r = loc_database_read_magic(db);
+       r = loc_database_check_magic(db);
+       if (r)
+               return r;
+
+       // Map the database into memory
+       r = loc_database_mmap(db);
        if (r)
                return r;
 
@@ -388,14 +454,44 @@ static int loc_database_read(struct loc_database* db, FILE* f) {
        return 0;
 }
 
+static void loc_database_free(struct loc_database* db) {
+       int r;
+
+       DEBUG(db->ctx, "Releasing database %p\n", db);
+
+       // Unmap the entire database
+       if (db->data) {
+               r = munmap(db->data, db->length);
+               if (r)
+                       ERROR(db->ctx, "Could not unmap the database: %m\n");
+       }
+
+       // Free the stringpool
+       if (db->pool)
+               loc_stringpool_unref(db->pool);
+
+       // Close database file
+       if (db->f)
+               fclose(db->f);
+
+       loc_unref(db->ctx);
+       free(db);
+}
+
 LOC_EXPORT int loc_database_new(struct loc_ctx* ctx, struct loc_database** database, FILE* f) {
+       struct loc_database* db = NULL;
+       int r = 1;
+
        // Fail on invalid file handle
-       if (!f)
-               return -EINVAL;
+       if (!f) {
+               errno = EINVAL;
+               return 1;
+       }
 
-       struct loc_database* db = calloc(1, sizeof(*db));
+       // Allocate the database object
+       db = calloc(1, sizeof(*db));
        if (!db)
-               return -ENOMEM;
+               goto ERROR;
 
        // Reference context
        db->ctx = loc_ref(ctx);
@@ -403,15 +499,19 @@ LOC_EXPORT int loc_database_new(struct loc_ctx* ctx, struct loc_database** datab
 
        DEBUG(db->ctx, "Database object allocated at %p\n", db);
 
-       int r = loc_database_read(db, f);
-       if (r) {
-               loc_database_unref(db);
-               return r;
-       }
+       // Try to open the database
+       r = loc_database_open(db, f);
+       if (r)
+               goto ERROR;
 
        *database = db;
-
        return 0;
+
+ERROR:
+       if (db)
+               loc_database_free(db);
+
+       return r;
 }
 
 LOC_EXPORT struct loc_database* loc_database_ref(struct loc_database* db) {
@@ -420,56 +520,6 @@ LOC_EXPORT struct loc_database* loc_database_ref(struct loc_database* db) {
        return db;
 }
 
-static void loc_database_free(struct loc_database* db) {
-       int r;
-
-       DEBUG(db->ctx, "Releasing database %p\n", db);
-
-       // Removing all ASes
-       if (db->as_v1) {
-               r = munmap(db->as_v1, db->as_count * sizeof(*db->as_v1));
-               if (r)
-                       ERROR(db->ctx, "Could not unmap AS section: %s\n", strerror(errno));
-       }
-
-       // Remove mapped network sections
-       if (db->networks_v1) {
-               r = munmap(db->networks_v1, db->networks_count * sizeof(*db->networks_v1));
-               if (r)
-                       ERROR(db->ctx, "Could not unmap networks section: %s\n", strerror(errno));
-       }
-
-       // Remove mapped network nodes section
-       if (db->network_nodes_v1) {
-               r = munmap(db->network_nodes_v1, db->network_nodes_count * sizeof(*db->network_nodes_v1));
-               if (r)
-                       ERROR(db->ctx, "Could not unmap network nodes section: %s\n", strerror(errno));
-       }
-
-       // Remove mapped countries section
-       if (db->countries_v1) {
-               r = munmap(db->countries_v1, db->countries_count * sizeof(*db->countries_v1));
-               if (r)
-                       ERROR(db->ctx, "Could not unmap countries section: %s\n", strerror(errno));
-       }
-
-       if (db->pool)
-               loc_stringpool_unref(db->pool);
-
-       // Free signature
-       if (db->signature1)
-               free(db->signature1);
-       if (db->signature2)
-               free(db->signature2);
-
-       // Close database file
-       if (db->f)
-               fclose(db->f);
-
-       loc_unref(db->ctx);
-       free(db);
-}
-
 LOC_EXPORT struct loc_database* loc_database_unref(struct loc_database* db) {
        if (--db->refcount > 0)
                return NULL;
@@ -479,8 +529,10 @@ LOC_EXPORT struct loc_database* loc_database_unref(struct loc_database* db) {
 }
 
 LOC_EXPORT int loc_database_verify(struct loc_database* db, FILE* f) {
+       size_t bytes_read = 0;
+
        // Cannot do this when no signature is available
-       if (!db->signature1 && !db->signature2) {
+       if (!db->signature1.data && !db->signature2.data) {
                DEBUG(db->ctx, "No signature available to verify\n");
                return 1;
        }
@@ -491,8 +543,8 @@ LOC_EXPORT int loc_database_verify(struct loc_database* db, FILE* f) {
        // Load public key
        EVP_PKEY* pkey = PEM_read_PUBKEY(f, NULL, NULL, NULL);
        if (!pkey) {
-               char* error = ERR_error_string(ERR_get_error(), NULL);
-               ERROR(db->ctx, "Could not parse public key: %s\n", error);
+               ERROR(db->ctx, "Could not parse public key: %s\n",
+                       ERR_error_string(ERR_get_error(), NULL));
 
                return -1;
        }
@@ -516,7 +568,12 @@ LOC_EXPORT int loc_database_verify(struct loc_database* db, FILE* f) {
 
        // Read magic
        struct loc_database_magic magic;
-       fread(&magic, 1, sizeof(magic), db->f);
+       bytes_read = fread(&magic, 1, sizeof(magic), db->f);
+       if (bytes_read < sizeof(magic)) {
+               ERROR(db->ctx, "Could not read header: %m\n");
+               r = 1;
+               goto CLEANUP;
+       }
 
        hexdump(db->ctx, &magic, sizeof(magic));
 
@@ -531,7 +588,6 @@ LOC_EXPORT int loc_database_verify(struct loc_database* db, FILE* f) {
 
        // Read the header
        struct loc_database_header_v1 header_v1;
-       size_t bytes_read;
 
        switch (db->version) {
                case LOC_DATABASE_VERSION_1:
@@ -585,43 +641,46 @@ LOC_EXPORT int loc_database_verify(struct loc_database* db, FILE* f) {
                }
        }
 
+       int sig1_valid = 0;
+       int sig2_valid = 0;
+
        // Check first signature
-       if (db->signature1) {
-               hexdump(db->ctx, db->signature1, db->signature1_length);
+       if (db->signature1.length) {
+               hexdump(db->ctx, db->signature1.data, db->signature1.length);
 
                r = EVP_DigestVerifyFinal(mdctx,
-                       (unsigned char*)db->signature1, db->signature1_length);
+                       (unsigned char*)db->signature1.data, db->signature1.length);
 
                if (r == 0) {
                        DEBUG(db->ctx, "The first signature is invalid\n");
-                       r = 1;
                } else if (r == 1) {
                        DEBUG(db->ctx, "The first signature is valid\n");
-                       r = 0;
+                       sig1_valid = 1;
                } else {
                        ERROR(db->ctx, "Error verifying the first signature: %s\n",
                                ERR_error_string(ERR_get_error(), NULL));
                        r = -1;
+                       goto CLEANUP;
                }
        }
 
        // Check second signature only when the first one was invalid
-       if (r && db->signature2) {
-               hexdump(db->ctx, db->signature2, db->signature2_length);
+       if (db->signature2.length) {
+               hexdump(db->ctx, db->signature2.data, db->signature2.length);
 
                r = EVP_DigestVerifyFinal(mdctx,
-                       (unsigned char*)db->signature2, db->signature2_length);
+                       (unsigned char*)db->signature2.data, db->signature2.length);
 
                if (r == 0) {
                        DEBUG(db->ctx, "The second signature is invalid\n");
-                       r = 1;
                } else if (r == 1) {
                        DEBUG(db->ctx, "The second signature is valid\n");
-                       r = 0;
+                       sig2_valid = 1;
                } else {
                        ERROR(db->ctx, "Error verifying the second signature: %s\n",
                                ERR_error_string(ERR_get_error(), NULL));
                        r = -1;
+                       goto CLEANUP;
                }
        }
 
@@ -629,6 +688,12 @@ LOC_EXPORT int loc_database_verify(struct loc_database* db, FILE* f) {
        INFO(db->ctx, "Signature checked in %.4fms\n",
                (double)(end - start) / CLOCKS_PER_SEC * 1000);
 
+       // Check if at least one signature as okay
+       if (sig1_valid || sig2_valid)
+               r = 0;
+       else
+               r = 1;
+
 CLEANUP:
        // Cleanup
        EVP_MD_CTX_free(mdctx);
@@ -654,29 +719,39 @@ LOC_EXPORT const char* loc_database_get_license(struct loc_database* db) {
 }
 
 LOC_EXPORT size_t loc_database_count_as(struct loc_database* db) {
-       return db->as_count;
+       return db->as_objects.count;
 }
 
 // Returns the AS at position pos
 static int loc_database_fetch_as(struct loc_database* db, struct loc_as** as, off_t pos) {
-       if ((size_t)pos >= db->as_count)
-               return -EINVAL;
+       struct loc_database_as_v1* as_v1 = NULL;
+       int r;
+
+       if ((size_t)pos >= db->as_objects.count) {
+               errno = ERANGE;
+               return 1;
+       }
 
        DEBUG(db->ctx, "Fetching AS at position %jd\n", (intmax_t)pos);
 
-       int r;
        switch (db->version) {
                case LOC_DATABASE_VERSION_1:
-                       r = loc_as_new_from_database_v1(db->ctx, db->pool, as, db->as_v1 + pos);
+                       // Find the object
+                       as_v1 = (struct loc_database_as_v1*)loc_database_object(db,
+                               &db->as_objects, sizeof(*as_v1), pos);
+                       if (!as_v1)
+                               return 1;
+
+                       r = loc_as_new_from_database_v1(db->ctx, db->pool, as, as_v1);
                        break;
 
                default:
-                       return -1;
+                       errno = ENOTSUP;
+                       return 1;
        }
 
-       if (r == 0) {
+       if (r == 0)
                DEBUG(db->ctx, "Got AS%u\n", loc_as_get_number(*as));
-       }
 
        return r;
 }
@@ -684,7 +759,7 @@ static int loc_database_fetch_as(struct loc_database* db, struct loc_as** as, of
 // Performs a binary search to find the AS in the list
 LOC_EXPORT int loc_database_get_as(struct loc_database* db, struct loc_as** as, uint32_t number) {
        off_t lo = 0;
-       off_t hi = db->as_count - 1;
+       off_t hi = db->as_objects.count - 1;
 
 #ifdef ENABLE_DEBUG
        // Save start time
@@ -732,33 +807,36 @@ LOC_EXPORT int loc_database_get_as(struct loc_database* db, struct loc_as** as,
 // Returns the network at position pos
 static int loc_database_fetch_network(struct loc_database* db, struct loc_network** network,
                struct in6_addr* address, unsigned int prefix, off_t pos) {
-       if ((size_t)pos >= db->networks_count) {
+       struct loc_database_network_v1* network_v1 = NULL;
+       int r;
+
+       if ((size_t)pos >= db->network_objects.count) {
                DEBUG(db->ctx, "Network ID out of range: %jd/%jd\n",
-                       (intmax_t)pos, (intmax_t)db->networks_count);
-               return -EINVAL;
+                       (intmax_t)pos, (intmax_t)db->network_objects.count);
+               errno = ERANGE;
+               return 1;
        }
 
-
        DEBUG(db->ctx, "Fetching network at position %jd\n", (intmax_t)pos);
 
-       int r;
        switch (db->version) {
                case LOC_DATABASE_VERSION_1:
-                       r = loc_network_new_from_database_v1(db->ctx, network,
-                               address, prefix, db->networks_v1 + pos);
+                       // Read the object
+                       network_v1 = (struct loc_database_network_v1*)loc_database_object(db,
+                               &db->network_objects, sizeof(*network_v1), pos);
+                       if (!network_v1)
+                               return 1;
+
+                       r = loc_network_new_from_database_v1(db->ctx, network, address, prefix, network_v1);
                        break;
 
                default:
-                       return -1;
+                       errno = ENOTSUP;
+                       return 1;
        }
 
-#ifdef ENABLE_DEBUG
-       if (r == 0) {
-               char* string = loc_network_str(*network);
-               DEBUG(db->ctx, "Got network %s\n", string);
-               free(string);
-       }
-#endif
+       if (r == 0)
+               DEBUG(db->ctx, "Got network %s\n", loc_network_str(*network));
 
        return r;
 }
@@ -772,18 +850,18 @@ static int __loc_database_lookup_handle_leaf(struct loc_database* db, const stru
                const struct loc_database_network_node_v1* node) {
        off_t network_index = be32toh(node->network);
 
-       DEBUG(db->ctx, "Handling leaf node at %jd (%jd)\n", (intmax_t)(node - db->network_nodes_v1), (intmax_t)network_index);
+       DEBUG(db->ctx, "Handling leaf node at %jd\n", (intmax_t)network_index);
 
        // Fetch the network
-       int r = loc_database_fetch_network(db, network,
-               network_address, prefix, network_index);
+       int r = loc_database_fetch_network(db, network, network_address, prefix, network_index);
        if (r) {
-               ERROR(db->ctx, "Could not fetch network %jd from database\n", (intmax_t)network_index);
+               ERROR(db->ctx, "Could not fetch network %jd from database: %m\n",
+                       (intmax_t)network_index);
                return r;
        }
 
        // Check if the given IP address is inside the network
-       if (!loc_network_match_address(*network, address)) {
+       if (!loc_network_matches_address(*network, address)) {
                DEBUG(db->ctx, "Searched address is not part of the network\n");
 
                loc_network_unref(*network);
@@ -798,29 +876,37 @@ static int __loc_database_lookup_handle_leaf(struct loc_database* db, const stru
 // Searches for an exact match along the path
 static int __loc_database_lookup(struct loc_database* db, const struct in6_addr* address,
                struct loc_network** network, struct in6_addr* network_address,
-               const struct loc_database_network_node_v1* node, unsigned int level) {
+               off_t node_index, unsigned int level) {
+       struct loc_database_network_node_v1* node_v1 = NULL;
+
        int r;
-       off_t node_index;
+
+       // Fetch the next node
+       node_v1 = (struct loc_database_network_node_v1*)loc_database_object(db,
+               &db->network_node_objects, sizeof(*node_v1), node_index);
+       if (!node_v1)
+               return 1;
 
        // Follow the path
-       int bit = in6_addr_get_bit(address, level);
-       in6_addr_set_bit(network_address, level, bit);
+       int bit = loc_address_get_bit(address, level);
+       loc_address_set_bit(network_address, level, bit);
 
        if (bit == 0)
-               node_index = be32toh(node->zero);
+               node_index = be32toh(node_v1->zero);
        else
-               node_index = be32toh(node->one);
+               node_index = be32toh(node_v1->one);
 
        // If the node index is zero, the tree ends here
        // and we cannot descend any further
        if (node_index > 0) {
                // Check boundaries
-               if ((size_t)node_index >= db->network_nodes_count)
-                       return -EINVAL;
+               if ((size_t)node_index >= db->network_node_objects.count) {
+                       errno = ERANGE;
+                       return 1;
+               }
 
                // Move on to the next node
-               r = __loc_database_lookup(db, address, network, network_address,
-                       db->network_nodes_v1 + node_index, level + 1);
+               r = __loc_database_lookup(db, address, network, network_address, node_index, level + 1);
 
                // End here if a result was found
                if (r == 0)
@@ -836,8 +922,8 @@ static int __loc_database_lookup(struct loc_database* db, const struct in6_addr*
        }
 
        // If this node has a leaf, we will check if it matches
-       if (__loc_database_node_is_leaf(node)) {
-               r = __loc_database_lookup_handle_leaf(db, address, network, network_address, level, node);
+       if (__loc_database_node_is_leaf(node_v1)) {
+               r = __loc_database_lookup_handle_leaf(db, address, network, network_address, level, node_v1);
                if (r <= 0)
                        return r;
        }
@@ -846,7 +932,7 @@ static int __loc_database_lookup(struct loc_database* db, const struct in6_addr*
 }
 
 LOC_EXPORT int loc_database_lookup(struct loc_database* db,
-               struct in6_addr* address, struct loc_network** network) {
+               const struct in6_addr* address, struct loc_network** network) {
        struct in6_addr network_address;
        memset(&network_address, 0, sizeof(network_address));
 
@@ -857,8 +943,7 @@ LOC_EXPORT int loc_database_lookup(struct loc_database* db,
        clock_t start = clock();
 #endif
 
-       int r = __loc_database_lookup(db, address, network, &network_address,
-               db->network_nodes_v1, 0);
+       int r = __loc_database_lookup(db, address, network, &network_address, 0, 0);
 
 #ifdef ENABLE_DEBUG
        clock_t end = clock();
@@ -875,7 +960,7 @@ LOC_EXPORT int loc_database_lookup_from_string(struct loc_database* db,
                const char* string, struct loc_network** network) {
        struct in6_addr address;
 
-       int r = loc_parse_address(db->ctx, string, &address);
+       int r = loc_address_parse(&address, NULL, string);
        if (r)
                return r;
 
@@ -885,24 +970,35 @@ LOC_EXPORT int loc_database_lookup_from_string(struct loc_database* db,
 // Returns the country at position pos
 static int loc_database_fetch_country(struct loc_database* db,
                struct loc_country** country, off_t pos) {
-       if ((size_t)pos >= db->countries_count)
-               return -EINVAL;
+       struct loc_database_country_v1* country_v1 = NULL;
+       int r;
+
+       // Check if the country is within range
+       if ((size_t)pos >= db->country_objects.count) {
+               errno = ERANGE;
+               return 1;
+       }
 
        DEBUG(db->ctx, "Fetching country at position %jd\n", (intmax_t)pos);
 
-       int r;
        switch (db->version) {
                case LOC_DATABASE_VERSION_1:
-                       r = loc_country_new_from_database_v1(db->ctx, db->pool, country, db->countries_v1 + pos);
+                       // Read the object
+                       country_v1 = (struct loc_database_country_v1*)loc_database_object(db,
+                               &db->country_objects, sizeof(*country_v1), pos);
+                       if (!country_v1)
+                               return 1;
+
+                       r = loc_country_new_from_database_v1(db->ctx, db->pool, country, country_v1);
                        break;
 
                default:
-                       return -1;
+                       errno = ENOTSUP;
+                       return 1;
        }
 
-       if (r == 0) {
+       if (r == 0)
                DEBUG(db->ctx, "Got country %s\n", loc_country_get_code(*country));
-       }
 
        return r;
 }
@@ -911,7 +1007,13 @@ static int loc_database_fetch_country(struct loc_database* db,
 LOC_EXPORT int loc_database_get_country(struct loc_database* db,
                struct loc_country** country, const char* code) {
        off_t lo = 0;
-       off_t hi = db->countries_count - 1;
+       off_t hi = db->country_objects.count - 1;
+
+       // Check if the country code is valid
+       if (!loc_country_code_is_valid(code)) {
+               errno = EINVAL;
+               return 1;
+       }
 
 #ifdef ENABLE_DEBUG
        // Save start time
@@ -955,7 +1057,7 @@ LOC_EXPORT int loc_database_get_country(struct loc_database* db,
        // Nothing found
        *country = NULL;
 
-       return 1;
+       return 0;
 }
 
 // Enumerator
@@ -977,20 +1079,27 @@ static void loc_database_enumerator_free(struct loc_database_enumerator* enumera
                loc_as_list_unref(enumerator->asns);
 
        // Free network search
-       free(enumerator->networks_visited);
+       if (enumerator->networks_visited)
+               free(enumerator->networks_visited);
 
-       // Free subnet stack
+       // Free subnet/bogons stack
        if (enumerator->stack)
                loc_network_list_unref(enumerator->stack);
 
+       if (enumerator->subnets)
+               loc_network_list_unref(enumerator->subnets);
+
        free(enumerator);
 }
 
 LOC_EXPORT int loc_database_enumerator_new(struct loc_database_enumerator** enumerator,
                struct loc_database* db, enum loc_database_enumerator_mode mode, int flags) {
+       int r;
+
        struct loc_database_enumerator* e = calloc(1, sizeof(*e));
-       if (!e)
+       if (!e) {
                return -ENOMEM;
+       }
 
        // Reference context
        e->ctx = loc_ref(db->ctx);
@@ -1003,19 +1112,32 @@ LOC_EXPORT int loc_database_enumerator_new(struct loc_database_enumerator** enum
 
        // Initialise graph search
        e->network_stack_depth = 1;
-       e->networks_visited = calloc(db->network_nodes_count, sizeof(*e->networks_visited));
+       e->networks_visited = calloc(db->network_node_objects.count, sizeof(*e->networks_visited));
+       if (!e->networks_visited) {
+               ERROR(db->ctx, "Could not allocated visited networks: %m\n");
+               r = 1;
+               goto ERROR;
+       }
 
        // Allocate stack
-       int r = loc_network_list_new(e->ctx, &e->stack);
-       if (r) {
-               loc_database_enumerator_free(e);
-               return r;
-       }
+       r = loc_network_list_new(e->ctx, &e->stack);
+       if (r)
+               goto ERROR;
+
+       // Initialize bogon search
+       loc_address_reset(&e->gap6_start, AF_INET6);
+       loc_address_reset(&e->gap4_start, AF_INET);
 
        DEBUG(e->ctx, "Database enumerator object allocated at %p\n", e);
 
        *enumerator = e;
        return 0;
+
+ERROR:
+       if (e)
+               loc_database_enumerator_free(e);
+
+       return r;
 }
 
 LOC_EXPORT struct loc_database_enumerator* loc_database_enumerator_ref(struct loc_database_enumerator* enumerator) {
@@ -1105,7 +1227,7 @@ LOC_EXPORT int loc_database_enumerator_next_as(
 
        struct loc_database* db = enumerator->db;
 
-       while (enumerator->as_index < db->as_count) {
+       while (enumerator->as_index < db->as_objects.count) {
                // Fetch the next AS
                int r = loc_database_fetch_as(db, as, enumerator->as_index++);
                if (r)
@@ -1140,7 +1262,15 @@ static int loc_database_enumerator_stack_push_node(
        // Check if there is any space left on the stack
        if (e->network_stack_depth >= MAX_STACK_DEPTH) {
                ERROR(e->ctx, "Maximum stack size reached: %d\n", e->network_stack_depth);
-               return -1;
+               return 1;
+       }
+
+       // Check if the node is in range
+       if (offset >= (off_t)e->db->network_node_objects.count) {
+               ERROR(e->ctx, "Trying to add invalid node with offset %jd/%zu\n",
+                       offset, e->db->network_node_objects.count);
+               errno = ERANGE;
+               return 1;
        }
 
        // Increase stack size
@@ -1155,41 +1285,45 @@ static int loc_database_enumerator_stack_push_node(
        return 0;
 }
 
-static int loc_database_enumerator_filter_network(
+static int loc_database_enumerator_match_network(
                struct loc_database_enumerator* enumerator, struct loc_network* network) {
-       // Skip if the family does not match
+       // If family is set, it must match
        if (enumerator->family && loc_network_address_family(network) != enumerator->family) {
                DEBUG(enumerator->ctx, "Filtered network %p because of family not matching\n", network);
-               return 1;
+               return 0;
        }
 
-       // Skip if the country code does not match
+       // Match if no filter criteria is configured
+       if (!enumerator->countries && !enumerator->asns && !enumerator->flags)
+               return 1;
+
+       // Check if the country code matches
        if (enumerator->countries && !loc_country_list_empty(enumerator->countries)) {
                const char* country_code = loc_network_get_country_code(network);
 
-               if (!loc_country_list_contains_code(enumerator->countries, country_code)) {
-                       DEBUG(enumerator->ctx, "Filtered network %p because of country code not matching\n", network);
+               if (loc_country_list_contains_code(enumerator->countries, country_code)) {
+                       DEBUG(enumerator->ctx, "Matched network %p because of its country code\n", network);
                        return 1;
                }
        }
 
-       // Skip if the ASN does not match
+       // Check if the ASN matches
        if (enumerator->asns && !loc_as_list_empty(enumerator->asns)) {
                uint32_t asn = loc_network_get_asn(network);
 
-               if (!loc_as_list_contains_number(enumerator->asns, asn)) {
-                       DEBUG(enumerator->ctx, "Filtered network %p because of ASN not matching\n", network);
+               if (loc_as_list_contains_number(enumerator->asns, asn)) {
+                       DEBUG(enumerator->ctx, "Matched network %p because of its ASN\n", network);
                        return 1;
                }
        }
 
-       // Skip if flags do not match
-       if (enumerator->flags && !loc_network_match_flag(network, enumerator->flags)) {
-               DEBUG(enumerator->ctx, "Filtered network %p because of flags not matching\n", network);
+       // Check if flags match
+       if (enumerator->flags && loc_network_has_flag(network, enumerator->flags)) {
+               DEBUG(enumerator->ctx, "Matched network %p because of its flags\n", network);
                return 1;
        }
 
-       // Do not filter
+       // Not a match
        return 0;
 }
 
@@ -1203,15 +1337,13 @@ static int __loc_database_enumerator_next_network(
                if (!*network)
                        break;
 
-               // Throw away any networks by filter
-               if (filter && loc_database_enumerator_filter_network(enumerator, *network)) {
-                       loc_network_unref(*network);
-                       *network = NULL;
-                       continue;
-               }
+               // Return everything if filter isn't enabled, or only return matches
+               if (!filter || loc_database_enumerator_match_network(enumerator, *network))
+                       return 0;
 
-               // Return result
-               return 0;
+               // Throw away anything that doesn't match
+               loc_network_unref(*network);
+               *network = NULL;
        }
 
        DEBUG(enumerator->ctx, "Called with a stack of %u nodes\n",
@@ -1224,6 +1356,8 @@ static int __loc_database_enumerator_next_network(
                // Get object from top of the stack
                struct loc_node_stack* node = &enumerator->network_stack[enumerator->network_stack_depth];
 
+               DEBUG(enumerator->ctx, "  Got node: %jd\n", node->offset);
+
                // Remove the node from the stack if we have already visited it
                if (enumerator->networks_visited[node->offset]) {
                        enumerator->network_stack_depth--;
@@ -1231,7 +1365,7 @@ static int __loc_database_enumerator_next_network(
                }
 
                // Mark the bits on the path correctly
-               in6_addr_set_bit(&enumerator->network_address,
+               loc_address_set_bit(&enumerator->network_address,
                        (node->depth > 0) ? node->depth - 1 : 0, node->i);
 
                DEBUG(enumerator->ctx, "Looking at node %jd\n", (intmax_t)node->offset);
@@ -1239,18 +1373,19 @@ static int __loc_database_enumerator_next_network(
 
                // Pop node from top of the stack
                struct loc_database_network_node_v1* n =
-                       enumerator->db->network_nodes_v1 + node->offset;
+                       (struct loc_database_network_node_v1*)loc_database_object(enumerator->db,
+                               &enumerator->db->network_node_objects, sizeof(*n), node->offset);
+               if (!n)
+                       return 1;
 
                // Add edges to stack
                int r = loc_database_enumerator_stack_push_node(enumerator,
                        be32toh(n->one), 1, node->depth + 1);
-
                if (r)
                        return r;
 
                r = loc_database_enumerator_stack_push_node(enumerator,
                        be32toh(n->zero), 0, node->depth + 1);
-
                if (r)
                        return r;
 
@@ -1268,19 +1403,13 @@ static int __loc_database_enumerator_next_network(
                        if (r)
                                return r;
 
-                       // Return all networks when the filter is disabled
-                       if (!filter)
+                       // Return all networks when the filter is disabled, or check for match
+                       if (!filter || loc_database_enumerator_match_network(enumerator, *network))
                                return 0;
 
-                       // Check if we are interested in this network
-                       if (loc_database_enumerator_filter_network(enumerator, *network)) {
-                               loc_network_unref(*network);
-                               *network = NULL;
-
-                               continue;
-                       }
-
-                       return 0;
+                       // Does not seem to be a match, so we cleanup and move on
+                       loc_network_unref(*network);
+                       *network = NULL;
                }
        }
 
@@ -1300,12 +1429,13 @@ static int __loc_database_enumerator_next_network_flattened(
                return 0;
 
        struct loc_network* subnet = NULL;
-       struct loc_network_list* subnets;
 
        // Create a list with all subnets
-       r = loc_network_list_new(enumerator->ctx, &subnets);
-       if (r)
-               return r;
+       if (!enumerator->subnets) {
+               r = loc_network_list_new(enumerator->ctx, &enumerator->subnets);
+               if (r)
+                       return r;
+       }
 
        // Search all subnets from the database
        while (1) {
@@ -1313,7 +1443,7 @@ static int __loc_database_enumerator_next_network_flattened(
                r = __loc_database_enumerator_next_network(enumerator, &subnet, 0);
                if (r) {
                        loc_network_unref(subnet);
-                       loc_network_list_unref(subnets);
+                       loc_network_list_clear(enumerator->subnets);
 
                        return r;
                }
@@ -1324,10 +1454,10 @@ static int __loc_database_enumerator_next_network_flattened(
 
                // Collect all subnets in a list
                if (loc_network_is_subnet(*network, subnet)) {
-                       r = loc_network_list_push(subnets, subnet);
+                       r = loc_network_list_push(enumerator->subnets, subnet);
                        if (r) {
                                loc_network_unref(subnet);
-                               loc_network_list_unref(subnets);
+                               loc_network_list_clear(enumerator->subnets);
 
                                return r;
                        }
@@ -1340,7 +1470,7 @@ static int __loc_database_enumerator_next_network_flattened(
                r = loc_network_list_push(enumerator->stack, subnet);
                if (r) {
                        loc_network_unref(subnet);
-                       loc_network_list_unref(subnets);
+                       loc_network_list_clear(enumerator->subnets);
 
                        return r;
                }
@@ -1349,27 +1479,28 @@ static int __loc_database_enumerator_next_network_flattened(
                break;
        }
 
-       DEBUG(enumerator->ctx, "Found %zu subnet(s)\n", loc_network_list_size(subnets));
+       DEBUG(enumerator->ctx, "Found %zu subnet(s)\n",
+               loc_network_list_size(enumerator->subnets));
 
        // We can abort here if the network has no subnets
-       if (loc_network_list_empty(subnets)) {
-               loc_network_list_unref(subnets);
+       if (loc_network_list_empty(enumerator->subnets)) {
+               loc_network_list_clear(enumerator->subnets);
 
                return 0;
        }
 
        // If the network has any subnets, we will break it into smaller parts
        // without the subnets.
-       struct loc_network_list* excluded = loc_network_exclude_list(*network, subnets);
+       struct loc_network_list* excluded = loc_network_exclude_list(*network, enumerator->subnets);
        if (!excluded) {
-               loc_network_list_unref(subnets);
-               return -1;
+               loc_network_list_clear(enumerator->subnets);
+               return 1;
        }
 
        // Merge subnets onto the stack
-       r = loc_network_list_merge(enumerator->stack, subnets);
+       r = loc_network_list_merge(enumerator->stack, enumerator->subnets);
        if (r) {
-               loc_network_list_unref(subnets);
+               loc_network_list_clear(enumerator->subnets);
                loc_network_list_unref(excluded);
 
                return r;
@@ -1378,13 +1509,13 @@ static int __loc_database_enumerator_next_network_flattened(
        // Push excluded list onto the stack
        r = loc_network_list_merge(enumerator->stack, excluded);
        if (r) {
-               loc_network_list_unref(subnets);
+               loc_network_list_clear(enumerator->subnets);
                loc_network_list_unref(excluded);
 
                return r;
        }
 
-       loc_network_list_unref(subnets);
+       loc_network_list_clear(enumerator->subnets);
        loc_network_list_unref(excluded);
 
        // Drop the network and restart the whole process again to pick the next network
@@ -1393,17 +1524,162 @@ static int __loc_database_enumerator_next_network_flattened(
        return __loc_database_enumerator_next_network_flattened(enumerator, network);
 }
 
+/*
+       This function finds all bogons (i.e. gaps) between the input networks
+*/
+static int __loc_database_enumerator_next_bogon(
+               struct loc_database_enumerator* enumerator, struct loc_network** bogon) {
+       int r;
+
+       // Return top element from the stack
+       while (1) {
+               *bogon = loc_network_list_pop_first(enumerator->stack);
+
+               // Stack is empty
+               if (!*bogon)
+                       break;
+
+               // Return result
+               return 0;
+       }
+
+       struct loc_network* network = NULL;
+       struct in6_addr* gap_start = NULL;
+       struct in6_addr gap_end = IN6ADDR_ANY_INIT;
+
+       while (1) {
+               r = __loc_database_enumerator_next_network(enumerator, &network, 1);
+               if (r)
+                       return r;
+
+               // We have read the last network
+               if (!network)
+                       goto FINISH;
+
+               const char* country_code = loc_network_get_country_code(network);
+
+               /*
+                       Skip anything that does not have a country code
+
+                       Even if a network is part of the routing table, and the database provides
+                       an ASN, this does not mean that this is a legitimate announcement.
+               */
+               if (country_code && !*country_code) {
+                       loc_network_unref(network);
+                       continue;
+               }
+
+               // Determine the network family
+               int family = loc_network_address_family(network);
+
+               switch (family) {
+                       case AF_INET6:
+                               gap_start = &enumerator->gap6_start;
+                               break;
+
+                       case AF_INET:
+                               gap_start = &enumerator->gap4_start;
+                               break;
+
+                       default:
+                               ERROR(enumerator->ctx, "Unsupported network family %d\n", family);
+                               errno = ENOTSUP;
+                               return 1;
+               }
+
+               const struct in6_addr* first_address = loc_network_get_first_address(network);
+               const struct in6_addr* last_address = loc_network_get_last_address(network);
+
+               // Skip if this network is a subnet of a former one
+               if (loc_address_cmp(gap_start, last_address) >= 0) {
+                       loc_network_unref(network);
+                       continue;
+               }
+
+               // Search where the gap could end
+               gap_end = *first_address;
+               loc_address_decrement(&gap_end);
+
+               // There is a gap
+               if (loc_address_cmp(gap_start, &gap_end) <= 0) {
+                       r = loc_network_list_summarize(enumerator->ctx,
+                               gap_start, &gap_end, &enumerator->stack);
+                       if (r) {
+                               loc_network_unref(network);
+                               return r;
+                       }
+               }
+
+               // The gap now starts after this network
+               *gap_start = *last_address;
+               loc_address_increment(gap_start);
+
+               loc_network_unref(network);
+
+               // Try to return something
+               *bogon = loc_network_list_pop_first(enumerator->stack);
+               if (*bogon)
+                       break;
+       }
+
+       return 0;
+
+FINISH:
+
+       if (!loc_address_all_zeroes(&enumerator->gap6_start)) {
+               r = loc_address_reset_last(&gap_end, AF_INET6);
+               if (r)
+                       return r;
+
+               if (loc_address_cmp(&enumerator->gap6_start, &gap_end) <= 0) {
+                       r = loc_network_list_summarize(enumerator->ctx,
+                               &enumerator->gap6_start, &gap_end, &enumerator->stack);
+                       if (r)
+                               return r;
+               }
+
+               // Reset start
+               loc_address_reset(&enumerator->gap6_start, AF_INET6);
+       }
+
+       if (!loc_address_all_zeroes(&enumerator->gap4_start)) {
+               r = loc_address_reset_last(&gap_end, AF_INET);
+               if (r)
+                       return r;
+
+               if (loc_address_cmp(&enumerator->gap4_start, &gap_end) <= 0) {
+                       r = loc_network_list_summarize(enumerator->ctx,
+                               &enumerator->gap4_start, &gap_end, &enumerator->stack);
+                       if (r)
+                               return r;
+               }
+
+               // Reset start
+               loc_address_reset(&enumerator->gap4_start, AF_INET);
+       }
+
+       // Try to return something
+       *bogon = loc_network_list_pop_first(enumerator->stack);
+
+       return 0;
+}
+
 LOC_EXPORT int loc_database_enumerator_next_network(
                struct loc_database_enumerator* enumerator, struct loc_network** network) {
-       // Do not do anything if not in network mode
-       if (enumerator->mode != LOC_DB_ENUMERATE_NETWORKS)
-               return 0;
+       switch (enumerator->mode) {
+               case LOC_DB_ENUMERATE_NETWORKS:
+                       // Flatten output?
+                       if (enumerator->flatten)
+                               return __loc_database_enumerator_next_network_flattened(enumerator, network);
 
-       // Flatten output?
-       if (enumerator->flatten)
-               return __loc_database_enumerator_next_network_flattened(enumerator, network);
+                       return __loc_database_enumerator_next_network(enumerator, network, 1);
+
+               case LOC_DB_ENUMERATE_BOGONS:
+                       return __loc_database_enumerator_next_bogon(enumerator, network);
 
-       return __loc_database_enumerator_next_network(enumerator, network, 1);
+               default:
+                       return 0;
+       }
 }
 
 LOC_EXPORT int loc_database_enumerator_next_country(
@@ -1416,7 +1692,7 @@ LOC_EXPORT int loc_database_enumerator_next_country(
 
        struct loc_database* db = enumerator->db;
 
-       while (enumerator->country_index < db->countries_count) {
+       while (enumerator->country_index < db->country_objects.count) {
                // Fetch the next country
                int r = loc_database_fetch_country(db, country, enumerator->country_index++);
                if (r)
index 0f2227994284e9a3ce2b2a24d8559cdd905df8ff..450c5e6fb4df341ac61e72f20ee956bc72b01e75 100644 (file)
 #include <stddef.h>
 #include <stdarg.h>
 #include <unistd.h>
-#include <errno.h>
 #include <string.h>
 #include <ctype.h>
 
-#include <loc/libloc.h>
-#include <loc/compat.h>
-#include <loc/private.h>
+#include <libloc/libloc.h>
+#include <libloc/compat.h>
+#include <libloc/private.h>
 
 struct loc_ctx {
        int refcount;
@@ -77,7 +76,7 @@ static int log_priority(const char* priority) {
 LOC_EXPORT int loc_new(struct loc_ctx** ctx) {
        struct loc_ctx* c = calloc(1, sizeof(*c));
        if (!c)
-               return -ENOMEM;
+               return 1;
 
        c->refcount = 1;
        c->log_fn = log_stderr;
@@ -104,9 +103,6 @@ LOC_EXPORT struct loc_ctx* loc_ref(struct loc_ctx* ctx) {
 }
 
 LOC_EXPORT struct loc_ctx* loc_unref(struct loc_ctx* ctx) {
-       if (!ctx)
-               return NULL;
-
        if (--ctx->refcount > 0)
                return NULL;
 
@@ -130,34 +126,3 @@ LOC_EXPORT int loc_get_log_priority(struct loc_ctx* ctx) {
 LOC_EXPORT void loc_set_log_priority(struct loc_ctx* ctx, int priority) {
        ctx->log_priority = priority;
 }
-
-LOC_EXPORT int loc_parse_address(struct loc_ctx* ctx, const char* string, struct in6_addr* address) {
-       DEBUG(ctx, "Parsing IP address %s\n", string);
-
-       // Try parsing this as an IPv6 address
-       int r = inet_pton(AF_INET6, string, address);
-
-       // If inet_pton returns one it has been successful
-       if (r == 1) {
-               DEBUG(ctx, "%s is an IPv6 address\n", string);
-               return 0;
-       }
-
-       // Try parsing this as an IPv4 address
-       struct in_addr ipv4_address;
-       r = inet_pton(AF_INET, string, &ipv4_address);
-       if (r == 1) {
-               DEBUG(ctx, "%s is an IPv4 address\n", string);
-
-               // Convert to IPv6-mapped address
-               address->s6_addr32[0] = htonl(0x0000);
-               address->s6_addr32[1] = htonl(0x0000);
-               address->s6_addr32[2] = htonl(0xffff);
-               address->s6_addr32[3] = ipv4_address.s_addr;
-
-               return 0;
-       }
-
-       DEBUG(ctx, "%s is not an valid IP address\n", string);
-       return -EINVAL;
-}
index ee333f18f86c41cbb7b00693b6fa81c5ee9768d4..50734b3b26bc6dc12b5f4cf7acbff0a490593b18 100644 (file)
@@ -1,23 +1,3 @@
-LIBLOC_PRIVATE {
-global:
-       # Network Tree
-       loc_network_tree_add_network;
-       loc_network_tree_count_networks;
-       loc_network_tree_count_nodes;
-       loc_network_tree_dump;
-       loc_network_tree_new;
-       loc_network_tree_unref;
-
-       # String Pool
-       loc_stringpool_add;
-       loc_stringpool_dump;
-       loc_stringpool_get;
-       loc_stringpool_get_size;
-       loc_stringpool_new;
-       loc_stringpool_ref;
-       loc_stringpool_unref;
-};
-
 LIBLOC_1 {
 global:
        loc_ref;
@@ -47,6 +27,7 @@ global:
        loc_as_list_new;
        loc_as_list_ref;
        loc_as_list_size;
+       loc_as_list_sort;
        loc_as_list_unref;
 
        # Country
@@ -59,6 +40,7 @@ global:
        loc_country_ref;
        loc_country_set_continent_code;
        loc_country_set_name;
+       loc_country_special_code_to_flag;
        loc_country_unref;
 
        # Country List
@@ -71,6 +53,7 @@ global:
        loc_country_list_new;
        loc_country_list_ref;
        loc_country_list_size;
+       loc_country_list_sort;
        loc_country_list_unref;
 
        # Database
@@ -117,10 +100,8 @@ global:
        loc_network_get_last_address;
        loc_network_has_flag;
        loc_network_is_subnet;
-       loc_network_match_address;
-       loc_network_match_asn;
-       loc_network_match_country_code;
-       loc_network_match_flag;
+       loc_network_matches_address;
+       loc_network_matches_country_code;
        loc_network_new;
        loc_network_new_from_string;
        loc_network_overlaps;
@@ -165,3 +146,10 @@ global:
 local:
        *;
 };
+
+LIBLOC_2 {
+global:
+       loc_network_reverse_pointer;
+local:
+       *;
+} LIBLOC_1;
diff --git a/src/libloc/address.h b/src/libloc/address.h
new file mode 100644 (file)
index 0000000..1c14696
--- /dev/null
@@ -0,0 +1,336 @@
+/*
+       libloc - A library to determine the location of someone on the Internet
+
+       Copyright (C) 2022 IPFire Development Team <info@ipfire.org>
+
+       This library is free software; you can redistribute it and/or
+       modify it under the terms of the GNU Lesser General Public
+       License as published by the Free Software Foundation; either
+       version 2.1 of the License, or (at your option) any later version.
+
+       This library is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+       Lesser General Public License for more details.
+*/
+
+#ifndef LIBLOC_ADDRESS_H
+#define LIBLOC_ADDRESS_H
+
+#ifdef LIBLOC_PRIVATE
+
+#include <errno.h>
+#include <netinet/in.h>
+
+#include <libloc/compat.h>
+
+/*
+       All of these functions are private and for internal use only
+*/
+
+const char* loc_address_str(const struct in6_addr* address);
+int loc_address_parse(struct in6_addr* address, unsigned int* prefix, const char* string);
+
+static inline int loc_address_family(const struct in6_addr* address) {
+       if (IN6_IS_ADDR_V4MAPPED(address))
+               return AF_INET;
+       else
+               return AF_INET6;
+}
+
+static inline unsigned int loc_address_family_bit_length(const int family) {
+       switch (family) {
+               case AF_INET6:
+                       return 128;
+
+               case AF_INET:
+                       return 32;
+
+               default:
+                       return 0;
+       }
+}
+
+/*
+       Checks whether prefix is valid for the given address
+*/
+static inline int loc_address_valid_prefix(const struct in6_addr* address, unsigned int prefix) {
+       const int family = loc_address_family(address);
+
+       // What is the largest possible prefix?
+       const unsigned int bit_length = loc_address_family_bit_length(family);
+
+       if (prefix <= bit_length)
+               return 1;
+
+       return 0;
+}
+
+static inline int loc_address_cmp(const struct in6_addr* a1, const struct in6_addr* a2) {
+       for (unsigned int i = 0; i < 16; i++) {
+               if (a1->s6_addr[i] > a2->s6_addr[i])
+                       return 1;
+
+               else if (a1->s6_addr[i] < a2->s6_addr[i])
+                       return -1;
+       }
+
+       return 0;
+}
+
+#define foreach_octet_in_address(octet, address) \
+       for (octet = (IN6_IS_ADDR_V4MAPPED(address) ? 12 : 0); octet <= 15; octet++)
+
+#define foreach_octet_in_address_reverse(octet, address) \
+       for (octet = 15; octet >= (IN6_IS_ADDR_V4MAPPED(address) ? 12 : 0); octet--)
+
+static inline int loc_address_all_zeroes(const struct in6_addr* address) {
+       int octet = 0;
+
+       foreach_octet_in_address(octet, address) {
+               if (address->s6_addr[octet])
+                       return 0;
+       }
+
+       return 1;
+}
+
+static inline int loc_address_all_ones(const struct in6_addr* address) {
+       int octet = 0;
+
+       foreach_octet_in_address(octet, address) {
+               if (address->s6_addr[octet] < 255)
+                       return 0;
+       }
+
+       return 1;
+}
+
+static inline int loc_address_get_bit(const struct in6_addr* address, unsigned int i) {
+       return ((address->s6_addr[i / 8] >> (7 - (i % 8))) & 1);
+}
+
+static inline void loc_address_set_bit(struct in6_addr* address, unsigned int i, unsigned int val) {
+       address->s6_addr[i / 8] ^= (-val ^ address->s6_addr[i / 8]) & (1 << (7 - (i % 8)));
+}
+
+static inline struct in6_addr loc_prefix_to_bitmask(const unsigned int prefix) {
+       struct in6_addr bitmask;
+
+       for (unsigned int i = 0; i < 16; i++)
+               bitmask.s6_addr[i] = 0;
+
+       for (int i = prefix, j = 0; i > 0; i -= 8, j++) {
+               if (i >= 8)
+                       bitmask.s6_addr[j] = 0xff;
+               else
+                       bitmask.s6_addr[j] = 0xff << (8 - i);
+       }
+
+       return bitmask;
+}
+
+static inline unsigned int loc_address_bit_length(const struct in6_addr* address) {
+       unsigned int bitlength = 0;
+
+       int octet = 0;
+
+       // Initialize the bit length
+       if (IN6_IS_ADDR_V4MAPPED(address))
+               bitlength = 32;
+       else
+               bitlength = 128;
+
+       // Walk backwards until we find the first one
+       foreach_octet_in_address_reverse(octet, address) {
+               // Count all trailing zeroes
+               int trailing_zeroes = __builtin_ctz(address->s6_addr[octet]);
+
+               // We only have one byte
+               if (trailing_zeroes > 8)
+                       trailing_zeroes = 8;
+
+               // Remove any trailing zeroes from the total length
+               bitlength -= trailing_zeroes;
+
+               if (trailing_zeroes < 8)
+                       return bitlength;
+       }
+
+       return 0;
+}
+
+static inline int loc_address_reset(struct in6_addr* address, int family) {
+       switch (family) {
+               case AF_INET6:
+                       address->s6_addr32[0] = 0x00000000;
+                       address->s6_addr32[1] = 0x00000000;
+                       address->s6_addr32[2] = 0x00000000;
+                       address->s6_addr32[3] = 0x00000000;
+                       return 0;
+
+               case AF_INET:
+                       address->s6_addr32[0] = 0x00000000;
+                       address->s6_addr32[1] = 0x00000000;
+                       address->s6_addr32[2] = htonl(0xffff);
+                       address->s6_addr32[3] = 0x00000000;
+                       return 0;
+       }
+
+       return -1;
+}
+
+static inline int loc_address_reset_last(struct in6_addr* address, int family) {
+       switch (family) {
+               case AF_INET6:
+                       address->s6_addr32[0] = 0xffffffff;
+                       address->s6_addr32[1] = 0xffffffff;
+                       address->s6_addr32[2] = 0xffffffff;
+                       address->s6_addr32[3] = 0xffffffff;
+                       return 0;
+
+               case AF_INET:
+                       address->s6_addr32[0] = 0x00000000;
+                       address->s6_addr32[1] = 0x00000000;
+                       address->s6_addr32[2] = htonl(0xffff);
+                       address->s6_addr32[3] = 0xffffffff;
+                       return 0;
+       }
+
+       return -1;
+}
+
+static inline struct in6_addr loc_address_and(
+               const struct in6_addr* address, const struct in6_addr* bitmask) {
+       struct in6_addr a;
+
+       // Perform bitwise AND
+       for (unsigned int i = 0; i < 4; i++)
+               a.s6_addr32[i] = address->s6_addr32[i] & bitmask->s6_addr32[i];
+
+       return a;
+}
+
+static inline struct in6_addr loc_address_or(
+               const struct in6_addr* address, const struct in6_addr* bitmask) {
+       struct in6_addr a;
+
+       // Perform bitwise OR
+       for (unsigned int i = 0; i < 4; i++)
+               a.s6_addr32[i] = address->s6_addr32[i] | ~bitmask->s6_addr32[i];
+
+       return a;
+}
+
+static inline int loc_address_sub(struct in6_addr* result,
+               const struct in6_addr* address1, const struct in6_addr* address2) {
+       int family1 = loc_address_family(address1);
+       int family2 = loc_address_family(address2);
+
+       // Address family must match
+       if (family1 != family2) {
+               errno = EINVAL;
+               return 1;
+       }
+
+       // Clear result
+       int r = loc_address_reset(result, family1);
+       if (r)
+               return r;
+
+       int octet = 0;
+       int remainder = 0;
+
+       foreach_octet_in_address_reverse(octet, address1) {
+               int x = address1->s6_addr[octet] - address2->s6_addr[octet] + remainder;
+
+               // Store remainder for the next iteration
+               remainder = (x >> 8);
+
+               result->s6_addr[octet] = x & 0xff;
+       }
+
+       return 0;
+}
+
+static inline void loc_address_increment(struct in6_addr* address) {
+       // Prevent overflow when everything is ones
+       if (loc_address_all_ones(address))
+               return;
+
+       int octet = 0;
+       foreach_octet_in_address_reverse(octet, address) {
+               if (address->s6_addr[octet] < 255) {
+                       address->s6_addr[octet]++;
+                       break;
+               } else {
+                       address->s6_addr[octet] = 0;
+               }
+       }
+}
+
+static inline void loc_address_decrement(struct in6_addr* address) {
+       // Prevent underflow when everything is ones
+       if (loc_address_all_zeroes(address))
+               return;
+
+       int octet = 0;
+       foreach_octet_in_address_reverse(octet, address) {
+               if (address->s6_addr[octet] > 0) {
+                       address->s6_addr[octet]--;
+                       break;
+               } else {
+                       address->s6_addr[octet] = 255;
+               }
+       }
+}
+
+static inline int loc_address_count_trailing_zero_bits(const struct in6_addr* address) {
+       int zeroes = 0;
+
+       int octet = 0;
+       foreach_octet_in_address_reverse(octet, address) {
+               if (address->s6_addr[octet]) {
+                       zeroes += __builtin_ctz(address->s6_addr[octet]);
+                       break;
+               } else
+                       zeroes += 8;
+       }
+
+       return zeroes;
+}
+
+static inline int loc_address_get_octet(const struct in6_addr* address, const unsigned int i) {
+       if (IN6_IS_ADDR_V4MAPPED(address)) {
+               if (i >= 4)
+                       return -ERANGE;
+
+               return (address->s6_addr32[3] >> (i * 8)) & 0xff;
+
+       } else {
+               if (i >= 32)
+                       return -ERANGE;
+
+               return address->s6_addr[i];
+       }
+}
+
+static inline int loc_address_get_nibble(const struct in6_addr* address, const unsigned int i) {
+       int octet = 0;
+
+       // Fetch the octet
+       octet = loc_address_get_octet(address, i / 2);
+       if (octet < 0)
+               return octet;
+
+       // Shift if we want an uneven nibble
+       if (i % 2 == 0)
+               octet >>= 4;
+
+       // Return the nibble
+       return octet & 0x0f;
+}
+
+#endif /* LIBLOC_PRIVATE */
+
+#endif /* LIBLOC_ADDRESS_H */
similarity index 93%
rename from src/loc/as-list.h
rename to src/libloc/as-list.h
index 7b5c4e859d915621e73ee6fa1d7f92036fb39f22..bd1d4e61c93a4d697517882f3079e63a5492550b 100644 (file)
@@ -17,8 +17,8 @@
 #ifndef LIBLOC_AS_LIST_H
 #define LIBLOC_AS_LIST_H
 
-#include <loc/as.h>
-#include <loc/libloc.h>
+#include <libloc/as.h>
+#include <libloc/libloc.h>
 
 struct loc_as_list;
 
@@ -38,4 +38,6 @@ int loc_as_list_contains(
 int loc_as_list_contains_number(
        struct loc_as_list* list, uint32_t number);
 
+void loc_as_list_sort(struct loc_as_list* list);
+
 #endif
similarity index 94%
rename from src/loc/as.h
rename to src/libloc/as.h
index b4c8e1d66fc509051b4541bb2d10a8c642c70b97..05e0188803267ba4bc342df3cfc907f2dd0754dd 100644 (file)
@@ -19,9 +19,9 @@
 
 #include <stdint.h>
 
-#include <loc/libloc.h>
-#include <loc/format.h>
-#include <loc/stringpool.h>
+#include <libloc/libloc.h>
+#include <libloc/format.h>
+#include <libloc/stringpool.h>
 
 struct loc_as;
 int loc_as_new(struct loc_ctx* ctx, struct loc_as** as, uint32_t number);
similarity index 92%
rename from src/loc/compat.h
rename to src/libloc/compat.h
index 87509766265dc1f2e3bb239811942309e6eb51a3..aecfeb41cfe4a978d31c33c23a2d705b1ad52230 100644 (file)
 #  define s6_addr32 __u6_addr.__u6_addr32
 #endif
 
+#ifndef reallocarray
+#  define reallocarray(ptr, nmemb, size) realloc(ptr, nmemb * size)
+#endif
+
 #endif
 
 #endif
similarity index 93%
rename from src/loc/country-list.h
rename to src/libloc/country-list.h
index a7f818a52f29597bd1f36044e11608aaa4a9757c..a479aeda9f244de4e425ff9e9eb9bebd19e76e6d 100644 (file)
@@ -19,8 +19,8 @@
 
 #include <stdlib.h>
 
-#include <loc/libloc.h>
-#include <loc/country.h>
+#include <libloc/libloc.h>
+#include <libloc/country.h>
 
 struct loc_country_list;
 
@@ -40,4 +40,6 @@ int loc_country_list_contains(
 int loc_country_list_contains_code(
        struct loc_country_list* list, const char* code);
 
+void loc_country_list_sort(struct loc_country_list* list);
+
 #endif
similarity index 88%
rename from src/loc/country.h
rename to src/libloc/country.h
index d09daee9381cdac6d12f7e93f86a97297fcea95f..76724ce855dd568200d57bcb41cd630040758b69 100644 (file)
@@ -17,9 +17,9 @@
 #ifndef LIBLOC_COUNTRY_H
 #define LIBLOC_COUNTRY_H
 
-#include <loc/libloc.h>
-#include <loc/format.h>
-#include <loc/stringpool.h>
+#include <libloc/libloc.h>
+#include <libloc/format.h>
+#include <libloc/stringpool.h>
 
 struct loc_country;
 int loc_country_new(struct loc_ctx* ctx, struct loc_country** country, const char* country_code);
@@ -37,6 +37,7 @@ int loc_country_set_name(struct loc_country* country, const char* name);
 int loc_country_cmp(struct loc_country* country1, struct loc_country* country2);
 
 int loc_country_code_is_valid(const char* cc);
+int loc_country_special_code_to_flag(const char* cc);
 
 #ifdef LIBLOC_PRIVATE
 
@@ -53,6 +54,10 @@ static inline void loc_country_code_copy(char* dst, const char* src) {
     }
 }
 
+static inline int loc_country_code_cmp(const char* cc1, const char* cc2) {
+       return memcmp(cc1, cc2, 2);
+}
+
 #endif
 
 #endif
similarity index 93%
rename from src/loc/database.h
rename to src/libloc/database.h
index 70801f0d66db5a34515e1fa8b5f42c1951b8013b..220f0fb1a0a2c9a28a7371a965cefe10d109de60 100644 (file)
 #include <stdio.h>
 #include <stdint.h>
 
-#include <loc/libloc.h>
-#include <loc/network.h>
-#include <loc/as.h>
-#include <loc/country.h>
-#include <loc/country-list.h>
+#include <libloc/libloc.h>
+#include <libloc/network.h>
+#include <libloc/as.h>
+#include <libloc/country.h>
+#include <libloc/country-list.h>
 
 struct loc_database;
 int loc_database_new(struct loc_ctx* ctx, struct loc_database** database, FILE* f);
@@ -43,7 +43,7 @@ int loc_database_get_as(struct loc_database* db, struct loc_as** as, uint32_t nu
 size_t loc_database_count_as(struct loc_database* db);
 
 int loc_database_lookup(struct loc_database* db,
-               struct in6_addr* address, struct loc_network** network);
+               const struct in6_addr* address, struct loc_network** network);
 int loc_database_lookup_from_string(struct loc_database* db,
                const char* string, struct loc_network** network);
 
@@ -54,6 +54,7 @@ enum loc_database_enumerator_mode {
        LOC_DB_ENUMERATE_NETWORKS  = 1,
        LOC_DB_ENUMERATE_ASES      = 2,
        LOC_DB_ENUMERATE_COUNTRIES = 3,
+       LOC_DB_ENUMERATE_BOGONS    = 4,
 };
 
 enum loc_database_enumerator_flags {
similarity index 96%
rename from src/loc/format.h
rename to src/libloc/format.h
index a04c089491772bee44c8043f641f110059934ff7..030394bc72e4003f7d1def2f8d50176cab4201b4 100644 (file)
@@ -20,6 +20,7 @@
 #include <stdint.h>
 
 #define LOC_DATABASE_MAGIC      "LOCDBXX"
+#define LOC_DATABASE_MAGIC_SIZE sizeof(struct loc_database_magic)
 
 enum loc_database_version {
        LOC_DATABASE_VERSION_UNSET = 0,
@@ -33,7 +34,7 @@ enum loc_database_version {
 #define LOC_DATABASE_DOMAIN "_v%u._db.location.ipfire.org"
 
 #define LOC_DATABASE_PAGE_SIZE         4096
-#define LOC_SIGNATURE_MAX_LENGTH       (LOC_DATABASE_PAGE_SIZE / 2)
+#define LOC_SIGNATURE_MAX_LENGTH       2048
 
 struct loc_database_magic {
        char magic[7];
similarity index 91%
rename from src/loc/libloc.h
rename to src/libloc/libloc.h
index 4854a418b1e554604824f7f323a8b44cdbc0bcd6..938ed75e0f1d77e4d28fd29e4c0e75f8e0c6fa2e 100644 (file)
@@ -36,10 +36,6 @@ void loc_set_log_fn(struct loc_ctx* ctx,
 int loc_get_log_priority(struct loc_ctx* ctx);
 void loc_set_log_priority(struct loc_ctx* ctx, int priority);
 
-#ifdef LIBLOC_PRIVATE
-int loc_parse_address(struct loc_ctx* ctx, const char* string, struct in6_addr* address);
-#endif
-
 #ifdef __cplusplus
 } /* extern "C" */
 #endif
similarity index 79%
rename from src/loc/network-list.h
rename to src/libloc/network-list.h
index bee21c4bdbbc5b6bbd7d42f1540787335d06684f..7d3c791bdee67e767a1dda3bc92b6d5b6c94310c 100644 (file)
@@ -17,7 +17,7 @@
 #ifndef LIBLOC_NETWORK_LIST_H
 #define LIBLOC_NETWORK_LIST_H
 
-#include <loc/network.h>
+#include <libloc/network.h>
 
 struct loc_network_list;
 int loc_network_list_new(struct loc_ctx* ctx, struct loc_network_list** list);
@@ -31,7 +31,20 @@ struct loc_network* loc_network_list_get(struct loc_network_list* list, size_t i
 int loc_network_list_push(struct loc_network_list* list, struct loc_network* network);
 struct loc_network* loc_network_list_pop(struct loc_network_list* list);
 struct loc_network* loc_network_list_pop_first(struct loc_network_list* list);
+int loc_network_list_remove(struct loc_network_list* list, struct loc_network* network);
 int loc_network_list_contains(struct loc_network_list* list, struct loc_network* network);
 int loc_network_list_merge(struct loc_network_list* self, struct loc_network_list* other);
 
+void loc_network_list_remove_with_prefix_smaller_than(
+       struct loc_network_list* list, const unsigned int prefix);
+
+#ifdef LIBLOC_PRIVATE
+
+#include <netinet/in.h>
+
+int loc_network_list_summarize(struct loc_ctx* ctx,
+       const struct in6_addr* first, const struct in6_addr* last, struct loc_network_list** list);
+
+#endif /* LOC_PRIVATE */
+
 #endif
diff --git a/src/libloc/network-tree.h b/src/libloc/network-tree.h
new file mode 100644 (file)
index 0000000..13052b7
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+       libloc - A library to determine the location of someone on the Internet
+
+       Copyright (C) 2017-2024 IPFire Development Team <info@ipfire.org>
+
+       This library is free software; you can redistribute it and/or
+       modify it under the terms of the GNU Lesser General Public
+       License as published by the Free Software Foundation; either
+       version 2.1 of the License, or (at your option) any later version.
+
+       This library is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+       Lesser General Public License for more details.
+*/
+
+#ifndef LIBLOC_NETWORK_TREE_H
+#define LIBLOC_NETWORK_TREE_H
+
+#ifdef LIBLOC_PRIVATE
+
+#include <libloc/libloc.h>
+#include <libloc/network.h>
+
+struct loc_network_tree;
+
+int loc_network_tree_new(struct loc_ctx* ctx, struct loc_network_tree** tree);
+
+struct loc_network_tree* loc_network_tree_unref(struct loc_network_tree* tree);
+
+struct loc_network_tree_node* loc_network_tree_get_root(struct loc_network_tree* tree);
+
+int loc_network_tree_walk(struct loc_network_tree* tree,
+               int(*filter_callback)(struct loc_network* network, void* data),
+               int(*callback)(struct loc_network* network, void* data), void* data);
+
+int loc_network_tree_dump(struct loc_network_tree* tree);
+
+int loc_network_tree_add_network(struct loc_network_tree* tree, struct loc_network* network);
+
+size_t loc_network_tree_count_nodes(struct loc_network_tree* tree);
+
+int loc_network_tree_cleanup(struct loc_network_tree* tree);
+
+/*
+       Nodes
+*/
+
+struct loc_network_tree_node;
+
+int loc_network_tree_node_new(struct loc_ctx* ctx, struct loc_network_tree_node** node);
+
+struct loc_network_tree_node* loc_network_tree_node_ref(struct loc_network_tree_node* node);
+struct loc_network_tree_node* loc_network_tree_node_unref(struct loc_network_tree_node* node);
+
+struct loc_network_tree_node* loc_network_tree_node_get(
+       struct loc_network_tree_node* node, unsigned int index);
+
+int loc_network_tree_node_is_leaf(struct loc_network_tree_node* node);
+
+struct loc_network* loc_network_tree_node_get_network(struct loc_network_tree_node* node);
+
+#endif /* LIBLOC_PRIVATE */
+
+#endif /* LIBLOC_NETWORK_TREE_H */
similarity index 60%
rename from src/loc/network.h
rename to src/libloc/network.h
index a30f65347d4c7143550667c75c02b30c66f79374..6f2dad2285b37a008646ec3ce8fd2e78a0526d23 100644 (file)
@@ -19,9 +19,9 @@
 
 #include <netinet/in.h>
 
-#include <loc/libloc.h>
-#include <loc/format.h>
-#include <loc/network-list.h>
+#include <libloc/libloc.h>
+#include <libloc/format.h>
+#include <libloc/network-list.h>
 
 enum loc_network_flags {
        LOC_NETWORK_FLAG_ANONYMOUS_PROXY    = (1 << 0), // A1
@@ -37,27 +37,25 @@ int loc_network_new_from_string(struct loc_ctx* ctx, struct loc_network** networ
                const char* address_string);
 struct loc_network* loc_network_ref(struct loc_network* network);
 struct loc_network* loc_network_unref(struct loc_network* network);
-char* loc_network_str(struct loc_network* network);
+const char* loc_network_str(struct loc_network* network);
 int loc_network_address_family(struct loc_network* network);
 unsigned int loc_network_prefix(struct loc_network* network);
 
 const struct in6_addr* loc_network_get_first_address(struct loc_network* network);
-char* loc_network_format_first_address(struct loc_network* network);
+const char* loc_network_format_first_address(struct loc_network* network);
 const struct in6_addr* loc_network_get_last_address(struct loc_network* network);
-char* loc_network_format_last_address(struct loc_network* network);
-int loc_network_match_address(struct loc_network* network, const struct in6_addr* address);
+const char* loc_network_format_last_address(struct loc_network* network);
+int loc_network_matches_address(struct loc_network* network, const struct in6_addr* address);
 
 const char* loc_network_get_country_code(struct loc_network* network);
 int loc_network_set_country_code(struct loc_network* network, const char* country_code);
-int loc_network_match_country_code(struct loc_network* network, const char* country_code);
+int loc_network_matches_country_code(struct loc_network* network, const char* country_code);
 
 uint32_t loc_network_get_asn(struct loc_network* network);
 int loc_network_set_asn(struct loc_network* network, uint32_t asn);
-int loc_network_match_asn(struct loc_network* network, uint32_t asn);
 
 int loc_network_has_flag(struct loc_network* network, uint32_t flag);
 int loc_network_set_flag(struct loc_network* network, uint32_t flag);
-int loc_network_match_flag(struct loc_network* network, uint32_t flag);
 
 int loc_network_cmp(struct loc_network* self, struct loc_network* other);
 int loc_network_overlaps(struct loc_network* self, struct loc_network* other);
@@ -68,32 +66,18 @@ struct loc_network_list* loc_network_exclude(
 struct loc_network_list* loc_network_exclude_list(
                struct loc_network* network, struct loc_network_list* list);
 
+char* loc_network_reverse_pointer(struct loc_network* network, const char* suffix);
+
 #ifdef LIBLOC_PRIVATE
 
+int loc_network_properties_cmp(struct loc_network* self, struct loc_network* other);
+unsigned int loc_network_raw_prefix(struct loc_network* network);
+
 int loc_network_to_database_v1(struct loc_network* network, struct loc_database_network_v1* dbobj);
 int loc_network_new_from_database_v1(struct loc_ctx* ctx, struct loc_network** network,
                struct in6_addr* address, unsigned int prefix, const struct loc_database_network_v1* dbobj);
 
-struct loc_network_tree;
-int loc_network_tree_new(struct loc_ctx* ctx, struct loc_network_tree** tree);
-struct loc_network_tree* loc_network_tree_unref(struct loc_network_tree* tree);
-struct loc_network_tree_node* loc_network_tree_get_root(struct loc_network_tree* tree);
-int loc_network_tree_walk(struct loc_network_tree* tree,
-               int(*filter_callback)(struct loc_network* network, void* data),
-               int(*callback)(struct loc_network* network, void* data), void* data);
-int loc_network_tree_dump(struct loc_network_tree* tree);
-int loc_network_tree_add_network(struct loc_network_tree* tree, struct loc_network* network);
-size_t loc_network_tree_count_networks(struct loc_network_tree* tree);
-size_t loc_network_tree_count_nodes(struct loc_network_tree* tree);
-
-struct loc_network_tree_node;
-int loc_network_tree_node_new(struct loc_ctx* ctx, struct loc_network_tree_node** node);
-struct loc_network_tree_node* loc_network_tree_node_ref(struct loc_network_tree_node* node);
-struct loc_network_tree_node* loc_network_tree_node_unref(struct loc_network_tree_node* node);
-struct loc_network_tree_node* loc_network_tree_node_get(struct loc_network_tree_node* node, unsigned int index);
-
-int loc_network_tree_node_is_leaf(struct loc_network_tree_node* node);
-struct loc_network* loc_network_tree_node_get_network(struct loc_network_tree_node* node);
+int loc_network_merge(struct loc_network** n, struct loc_network* n1, struct loc_network* n2);
 
 #endif
 #endif
similarity index 81%
rename from src/loc/private.h
rename to src/libloc/private.h
index 6b98d1c4d2f53c39868ecfcdcca89849dc863277..89ce5bc2853d8a435a43a63bd7b6f952650f4cf0 100644 (file)
 
 #ifdef LIBLOC_PRIVATE
 
-#include <netinet/in.h>
-#include <stdbool.h>
 #include <stdio.h>
 #include <syslog.h>
 
-#include <loc/libloc.h>
+#include <libloc/libloc.h>
 
 static inline void __attribute__((always_inline, format(printf, 2, 3)))
 loc_log_null(struct loc_ctx *ctx, const char *format, ...) {}
@@ -58,25 +56,6 @@ void loc_log(struct loc_ctx *ctx,
        int priority, const char *file, int line, const char *fn,
        const char *format, ...) __attribute__((format(printf, 6, 7)));
 
-static inline int in6_addr_cmp(const struct in6_addr* a1, const struct in6_addr* a2) {
-       for (unsigned int i = 0; i < 16; i++) {
-               if (a1->s6_addr[i] > a2->s6_addr[i])
-                       return 1;
-
-               else if (a1->s6_addr[i] < a2->s6_addr[i])
-                       return -1;
-       }
-
-       return 0;
-}
-
-static inline int in6_addr_get_bit(const struct in6_addr* address, unsigned int i) {
-       return ((address->s6_addr[i / 8] >> (7 - (i % 8))) & 1);
-}
-
-static inline void in6_addr_set_bit(struct in6_addr* address, unsigned int i, unsigned int val) {
-       address->s6_addr[i / 8] ^= (-val ^ address->s6_addr[i / 8]) & (1 << (7 - (i % 8)));
-}
 
 static inline void hexdump(struct loc_ctx* ctx, const void* addr, size_t len) {
        char buffer_hex[16 * 3 + 6];
@@ -87,6 +66,9 @@ static inline void hexdump(struct loc_ctx* ctx, const void* addr, size_t len) {
 
        DEBUG(ctx, "Dumping %zu byte(s)\n", len);
 
+       if (!len)
+               return;
+
        // Process every byte in the data
        for (i = 0; i < len; i++) {
                // Multiple of 16 means new line (with line offset)
similarity index 96%
rename from src/loc/resolv.h
rename to src/libloc/resolv.h
index 3b70c606f217b26ded7a0c03e3995a79fee7bd9d..dd13d60d8674ab04e9aa21b5321443d1eda5707f 100644 (file)
@@ -19,7 +19,7 @@
 
 #include <time.h>
 
-#include <loc/libloc.h>
+#include <libloc/libloc.h>
 
 int loc_discover_latest_version(struct loc_ctx* ctx, unsigned int version, time_t* t);
 
similarity index 95%
rename from src/loc/stringpool.h
rename to src/libloc/stringpool.h
index 8068242c24b5bb78fe37f97307e53a7ed4023ad8..c6bd2161ff3e1a22283afe9ab1ea4f3c41a467bf 100644 (file)
 #include <stddef.h>
 #include <stdio.h>
 
-#include <loc/libloc.h>
+#include <libloc/libloc.h>
 
 struct loc_stringpool;
 int loc_stringpool_new(struct loc_ctx* ctx, struct loc_stringpool** pool);
 int loc_stringpool_open(struct loc_ctx* ctx, struct loc_stringpool** pool,
-       FILE* f, size_t length, off_t offset);
+       const char* data, const size_t length);
 
 struct loc_stringpool* loc_stringpool_ref(struct loc_stringpool* pool);
 struct loc_stringpool* loc_stringpool_unref(struct loc_stringpool* pool);
similarity index 92%
rename from src/loc/writer.h
rename to src/libloc/writer.h
index f106a948b0c0a16c13464d14f79e8dd47853244a..eae9548ab74cd1186f2f93d26f71d03a50e7480b 100644 (file)
 
 #include <stdio.h>
 
-#include <loc/libloc.h>
-#include <loc/as.h>
-#include <loc/country.h>
-#include <loc/database.h>
-#include <loc/network.h>
+#include <libloc/libloc.h>
+#include <libloc/as.h>
+#include <libloc/country.h>
+#include <libloc/database.h>
+#include <libloc/network.h>
 
 struct loc_writer;
 
diff --git a/src/lua/as.c b/src/lua/as.c
new file mode 100644 (file)
index 0000000..558fcbf
--- /dev/null
@@ -0,0 +1,136 @@
+/*
+       libloc - A library to determine the location of someone on the Internet
+
+       Copyright (C) 2024 IPFire Development Team <info@ipfire.org>
+
+       This library is free software; you can redistribute it and/or
+       modify it under the terms of the GNU Lesser General Public
+       License as published by the Free Software Foundation; either
+       version 2.1 of the License, or (at your option) any later version.
+
+       This library is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+       Lesser General Public License for more details.
+*/
+
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <lua.h>
+#include <lauxlib.h>
+
+#include <libloc/as.h>
+
+#include "location.h"
+#include "as.h"
+#include "compat.h"
+
+typedef struct as {
+       struct loc_as* as;
+} AS;
+
+static AS* luaL_checkas(lua_State* L, int i) {
+       void* userdata = luaL_checkudata(L, i, "location.AS");
+
+       // Throw an error if the argument doesn't match
+       luaL_argcheck(L, userdata, i, "AS expected");
+
+       return (AS*)userdata;
+}
+
+int create_as(lua_State* L, struct loc_as* as) {
+       // Allocate a new object
+       AS* self = (AS*)lua_newuserdata(L, sizeof(*self));
+
+       // Set metatable
+       luaL_setmetatable(L, "location.AS");
+
+       // Store country
+       self->as = loc_as_ref(as);
+
+       return 1;
+}
+
+static int AS_new(lua_State* L) {
+       struct loc_as* as = NULL;
+       unsigned int n = 0;
+       int r;
+
+       // Fetch the number
+       n = luaL_checknumber(L, 1);
+
+       // Create the AS
+       r = loc_as_new(ctx, &as, n);
+       if (r)
+               return luaL_error(L, "Could not create AS %u: %s\n", n, strerror(errno));
+
+       // Return the AS
+       r = create_as(L, as);
+       loc_as_unref(as);
+
+       return r;
+}
+
+static int AS_gc(lua_State* L) {
+       AS* self = luaL_checkas(L, 1);
+
+       // Free AS
+       if (self->as) {
+               loc_as_unref(self->as);
+               self->as = NULL;
+       }
+
+       return 0;
+}
+
+static int AS_tostring(lua_State* L) {
+       AS* self = luaL_checkas(L, 1);
+
+       uint32_t number = loc_as_get_number(self->as);
+       const char* name = loc_as_get_name(self->as);
+
+       // Return string
+       if (name)
+               lua_pushfstring(L, "AS%d - %s", number, name);
+       else
+               lua_pushfstring(L, "AS%d", number);
+
+       return 1;
+}
+
+// Name
+
+static int AS_get_name(lua_State* L) {
+       AS* self = luaL_checkas(L, 1);
+
+       // Return the name
+       lua_pushstring(L, loc_as_get_name(self->as));
+
+       return 1;
+}
+
+// Number
+
+static int AS_get_number(lua_State* L) {
+       AS* self = luaL_checkas(L, 1);
+
+       // Return the number
+       lua_pushnumber(L, loc_as_get_number(self->as));
+
+       return 1;
+}
+
+static const struct luaL_Reg AS_functions[] = {
+       { "new", AS_new },
+       { "get_name", AS_get_name },
+       { "get_number", AS_get_number },
+       { "__gc", AS_gc },
+       { "__tostring", AS_tostring },
+       { NULL, NULL },
+};
+
+int register_as(lua_State* L) {
+       return register_class(L, "location.AS", AS_functions);
+}
diff --git a/src/lua/as.h b/src/lua/as.h
new file mode 100644 (file)
index 0000000..0ea34f9
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+       libloc - A library to determine the location of someone on the Internet
+
+       Copyright (C) 2024 IPFire Development Team <info@ipfire.org>
+
+       This library is free software; you can redistribute it and/or
+       modify it under the terms of the GNU Lesser General Public
+       License as published by the Free Software Foundation; either
+       version 2.1 of the License, or (at your option) any later version.
+
+       This library is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+       Lesser General Public License for more details.
+*/
+
+#ifndef LUA_LOCATION_AS_H
+#define LUA_LOCATION_AS_H
+
+#include <lua.h>
+#include <lauxlib.h>
+
+#include <libloc/as.h>
+
+int register_as(lua_State* L);
+
+int create_as(lua_State* L, struct loc_as* as);
+
+#endif /* LUA_LOCATION_AS_H */
diff --git a/src/lua/compat.h b/src/lua/compat.h
new file mode 100644 (file)
index 0000000..f0172b8
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+       libloc - A library to determine the location of someone on the Internet
+
+       Copyright (C) 2024 IPFire Development Team <info@ipfire.org>
+
+       This library is free software; you can redistribute it and/or
+       modify it under the terms of the GNU Lesser General Public
+       License as published by the Free Software Foundation; either
+       version 2.1 of the License, or (at your option) any later version.
+
+       This library is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+       Lesser General Public License for more details.
+*/
+
+#ifndef LUA_LOCATION_COMPAT_H
+#define LUA_LOCATION_COMPAT_H
+
+#include <lua.h>
+#include <lauxlib.h>
+
+#if LUA_VERSION_RELEASE_NUM < 502
+
+static inline void luaL_setmetatable(lua_State* L, const char* name) {
+       luaL_checkstack(L, 1, "not enough stack slots");
+       luaL_getmetatable(L, name);
+       lua_setmetatable(L, -2);
+}
+
+static inline void luaL_setfuncs(lua_State* L, const luaL_Reg* l, int nup) {
+       int i;
+
+       luaL_checkstack(L, nup+1, "too many upvalues");
+
+       for (; l->name != NULL; l++) {
+               lua_pushstring(L, l->name);
+
+               for (i = 0; i < nup; i++)
+                       lua_pushvalue(L, -(nup + 1));
+
+               lua_pushcclosure(L, l->func, nup);
+               lua_settable(L, -(nup + 3));
+  }
+
+  lua_pop(L, nup);
+}
+
+static inline void luaL_newlib(lua_State* L, const luaL_Reg* l) {
+  lua_newtable(L);
+  luaL_setfuncs(L, l, 0);
+}
+
+#endif /* Lua < 5.2 */
+
+#endif /* LUA_LOCATION_COMPAT_H */
diff --git a/src/lua/country.c b/src/lua/country.c
new file mode 100644 (file)
index 0000000..816bd2f
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+       libloc - A library to determine the location of someone on the Internet
+
+       Copyright (C) 2024 IPFire Development Team <info@ipfire.org>
+
+       This library is free software; you can redistribute it and/or
+       modify it under the terms of the GNU Lesser General Public
+       License as published by the Free Software Foundation; either
+       version 2.1 of the License, or (at your option) any later version.
+
+       This library is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+       Lesser General Public License for more details.
+*/
+
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <lua.h>
+#include <lauxlib.h>
+
+#include <libloc/country.h>
+
+#include "location.h"
+#include "compat.h"
+#include "country.h"
+
+typedef struct country {
+       struct loc_country* country;
+} Country;
+
+static Country* luaL_checkcountry(lua_State* L, int i) {
+       void* userdata = luaL_checkudata(L, i, "location.Country");
+
+       // Throw an error if the argument doesn't match
+       luaL_argcheck(L, userdata, i, "Country expected");
+
+       return (Country*)userdata;
+}
+
+int create_country(lua_State* L, struct loc_country* country) {
+       // Allocate a new object
+       Country* self = (Country*)lua_newuserdata(L, sizeof(*self));
+
+       // Set metatable
+       luaL_setmetatable(L, "location.Country");
+
+       // Store country
+       self->country = loc_country_ref(country);
+
+       return 1;
+}
+
+static int Country_new(lua_State* L) {
+       struct loc_country* country = NULL;
+       const char* code = NULL;
+       int r;
+
+       // Fetch the code
+       code = luaL_checkstring(L, 1);
+
+       // Parse the string
+       r = loc_country_new(ctx, &country, code);
+       if (r)
+               return luaL_error(L, "Could not create country %s: %s\n", code, strerror(errno));
+
+       // Return the country
+       r = create_country(L, country);
+       loc_country_unref(country);
+
+       return r;
+}
+
+static int Country_gc(lua_State* L) {
+       Country* self = luaL_checkcountry(L, 1);
+
+       // Free country
+       if (self->country) {
+               loc_country_unref(self->country);
+               self->country = NULL;
+       }
+
+       return 0;
+}
+
+static int Country_eq(lua_State* L) {
+       Country* self  = luaL_checkcountry(L, 1);
+       Country* other = luaL_checkcountry(L, 2);
+
+       // Push comparison result
+       lua_pushboolean(L, loc_country_cmp(self->country, other->country) == 0);
+
+       return 1;
+}
+
+// Name
+
+static int Country_get_name(lua_State* L) {
+       Country* self = luaL_checkcountry(L, 1);
+
+       // Return the code
+       lua_pushstring(L, loc_country_get_name(self->country));
+
+       return 1;
+}
+
+// Code
+
+static int Country_get_code(lua_State* L) {
+       Country* self = luaL_checkcountry(L, 1);
+
+       // Return the code
+       lua_pushstring(L, loc_country_get_code(self->country));
+
+       return 1;
+}
+
+// Continent Code
+
+static int Country_get_continent_code(lua_State* L) {
+       Country* self = luaL_checkcountry(L, 1);
+
+       // Return the code
+       lua_pushstring(L, loc_country_get_continent_code(self->country));
+
+       return 1;
+}
+
+static const struct luaL_Reg Country_functions[] = {
+       { "new", Country_new },
+       { "get_code", Country_get_code },
+       { "get_continent_code", Country_get_continent_code },
+       { "get_name", Country_get_name },
+       { "__eq", Country_eq },
+       { "__gc", Country_gc },
+       { "__tostring", Country_get_code },
+       { NULL, NULL },
+};
+
+int register_country(lua_State* L) {
+       return register_class(L, "location.Country", Country_functions);
+}
diff --git a/src/lua/country.h b/src/lua/country.h
new file mode 100644 (file)
index 0000000..4997d9d
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+       libloc - A library to determine the location of someone on the Internet
+
+       Copyright (C) 2024 IPFire Development Team <info@ipfire.org>
+
+       This library is free software; you can redistribute it and/or
+       modify it under the terms of the GNU Lesser General Public
+       License as published by the Free Software Foundation; either
+       version 2.1 of the License, or (at your option) any later version.
+
+       This library is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+       Lesser General Public License for more details.
+*/
+
+#ifndef LUA_LOCATION_COUNTRY_H
+#define LUA_LOCATION_COUNTRY_H
+
+#include <lua.h>
+#include <lauxlib.h>
+
+#include <libloc/country.h>
+
+int register_country(lua_State* L);
+
+int create_country(lua_State* L, struct loc_country* country);
+
+#endif /* LUA_LOCATION_COUNTRY_H */
diff --git a/src/lua/database.c b/src/lua/database.c
new file mode 100644 (file)
index 0000000..fcbbad0
--- /dev/null
@@ -0,0 +1,329 @@
+/*
+       libloc - A library to determine the location of someone on the Internet
+
+       Copyright (C) 2024 IPFire Development Team <info@ipfire.org>
+
+       This library is free software; you can redistribute it and/or
+       modify it under the terms of the GNU Lesser General Public
+       License as published by the Free Software Foundation; either
+       version 2.1 of the License, or (at your option) any later version.
+
+       This library is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+       Lesser General Public License for more details.
+*/
+
+#include <errno.h>
+#include <string.h>
+
+#include <lua.h>
+#include <lauxlib.h>
+
+#include <libloc/database.h>
+
+#include "location.h"
+#include "as.h"
+#include "compat.h"
+#include "country.h"
+#include "database.h"
+#include "network.h"
+
+typedef struct database {
+       struct loc_database* db;
+} Database;
+
+static Database* luaL_checkdatabase(lua_State* L, int i) {
+       void* userdata = luaL_checkudata(L, i, "location.Database");
+
+       // Throw an error if the argument doesn't match
+       luaL_argcheck(L, userdata, i, "Database expected");
+
+       return (Database*)userdata;
+}
+
+static int Database_open(lua_State* L) {
+       const char* path = NULL;
+       FILE* f = NULL;
+       int r;
+
+       // Fetch the path
+       path = luaL_checkstring(L, 1);
+
+       // Allocate a new object
+       Database* self = (Database*)lua_newuserdata(L, sizeof(*self));
+
+       // Set metatable
+       luaL_setmetatable(L, "location.Database");
+
+       // Open the database file
+       f = fopen(path, "r");
+       if (!f)
+               return luaL_error(L, "Could not open %s: %s\n", path, strerror(errno));
+
+       // Open the database
+       r = loc_database_new(ctx, &self->db, f);
+
+       // Close the file descriptor
+       fclose(f);
+
+       // Check for errors
+       if (r)
+               return luaL_error(L, "Could not open database %s: %s\n", path, strerror(errno));
+
+       return 1;
+}
+
+static int Database_gc(lua_State* L) {
+       Database* self = luaL_checkdatabase(L, 1);
+
+       // Free database
+       if (self->db) {
+               loc_database_unref(self->db);
+               self->db = NULL;
+       }
+
+       return 0;
+}
+
+// Created At
+
+static int Database_created_at(lua_State* L) {
+       Database* self = luaL_checkdatabase(L, 1);
+
+       // Fetch the time
+       time_t created_at = loc_database_created_at(self->db);
+
+       // Push the time onto the stack
+       lua_pushnumber(L, created_at);
+
+       return 1;
+}
+
+// Description
+
+static int Database_get_description(lua_State* L) {
+       Database* self = luaL_checkdatabase(L, 1);
+
+       // Push the description
+       lua_pushstring(L, loc_database_get_description(self->db));
+
+       return 1;
+}
+
+// License
+
+static int Database_get_license(lua_State* L) {
+       Database* self = luaL_checkdatabase(L, 1);
+
+       // Push the license
+       lua_pushstring(L, loc_database_get_license(self->db));
+
+       return 1;
+}
+
+static int Database_get_vendor(lua_State* L) {
+       Database* self = luaL_checkdatabase(L, 1);
+
+       // Push the vendor
+       lua_pushstring(L, loc_database_get_vendor(self->db));
+
+       return 1;
+}
+
+static int Database_get_as(lua_State* L) {
+       struct loc_as* as = NULL;
+       int r;
+
+       Database* self = luaL_checkdatabase(L, 1);
+
+       // Fetch number
+       uint32_t asn = luaL_checknumber(L, 2);
+
+       // Fetch the AS
+       r = loc_database_get_as(self->db, &as, asn);
+       if (r) {
+               lua_pushnil(L);
+               return 1;
+       }
+
+       // Create a new AS object
+       r = create_as(L, as);
+       loc_as_unref(as);
+
+       return r;
+}
+
+static int Database_get_country(lua_State* L) {
+       struct loc_country* country = NULL;
+       int r;
+
+       Database* self = luaL_checkdatabase(L, 1);
+
+       // Fetch code
+       const char* code = luaL_checkstring(L, 2);
+
+       // Fetch the country
+       r = loc_database_get_country(self->db, &country, code);
+       if (r) {
+               lua_pushnil(L);
+               return 1;
+       }
+
+       // Create a new country object
+       r = create_country(L, country);
+       loc_country_unref(country);
+
+       return r;
+}
+
+static int Database_lookup(lua_State* L) {
+       struct loc_network* network = NULL;
+       int r;
+
+       Database* self = luaL_checkdatabase(L, 1);
+
+       // Require a string
+       const char* address = luaL_checkstring(L, 2);
+
+       // Perform lookup
+       r = loc_database_lookup_from_string(self->db, address, &network);
+       if (r) {
+               switch (errno) {
+                       // Return nil if the network was not found
+                       case ENOENT:
+                               lua_pushnil(L);
+                               return 1;
+
+                       default:
+                               return luaL_error(L, "Could not lookup address %s: %s\n", address, strerror(errno));
+               }
+       }
+
+       // Create a network object
+       r = create_network(L, network);
+       loc_network_unref(network);
+
+       return r;
+}
+
+static int Database_verify(lua_State* L) {
+       FILE* f = NULL;
+       int r;
+
+       Database* self = luaL_checkdatabase(L, 1);
+
+       // Fetch path to key
+       const char* key = luaL_checkstring(L, 2);
+
+       // Open the keyfile
+       f = fopen(key, "r");
+       if (!f)
+               return luaL_error(L, "Could not open key %s: %s\n", key, strerror(errno));
+
+       // Verify!
+       r = loc_database_verify(self->db, f);
+       fclose(f);
+
+       // Push result onto the stack
+       lua_pushboolean(L, (r == 0));
+
+       return 1;
+}
+
+typedef struct enumerator {
+       struct loc_database_enumerator* e;
+} DatabaseEnumerator;
+
+static DatabaseEnumerator* luaL_checkdatabaseenumerator(lua_State* L, int i) {
+       void* userdata = luaL_checkudata(L, i, "location.DatabaseEnumerator");
+
+       // Throw an error if the argument doesn't match
+       luaL_argcheck(L, userdata, i, "DatabaseEnumerator expected");
+
+       return (DatabaseEnumerator*)userdata;
+}
+
+static int DatabaseEnumerator_gc(lua_State* L) {
+       DatabaseEnumerator* self = luaL_checkdatabaseenumerator(L, 1);
+
+       if (self->e) {
+               loc_database_enumerator_unref(self->e);
+               self->e = NULL;
+       }
+
+       return 0;
+}
+
+static int DatabaseEnumerator_next_network(lua_State* L) {
+       struct loc_network* network = NULL;
+       int r;
+
+       DatabaseEnumerator* self = luaL_checkdatabaseenumerator(L, lua_upvalueindex(1));
+
+       // Fetch the next network
+       r = loc_database_enumerator_next_network(self->e, &network);
+       if (r)
+               return luaL_error(L, "Could not fetch network: %s\n", strerror(errno));
+
+       // If we have received no network, we have reached the end
+       if (!network) {
+               lua_pushnil(L);
+               return 1;
+       }
+
+       // Create a network object
+       r = create_network(L, network);
+       loc_network_unref(network);
+
+       return r;
+}
+
+static int Database_list_networks(lua_State* L) {
+       DatabaseEnumerator* e = NULL;
+       int r;
+
+       Database* self = luaL_checkdatabase(L, 1);
+
+       // Allocate a new enumerator
+       e = lua_newuserdata(L, sizeof(*e));
+       luaL_setmetatable(L, "location.DatabaseEnumerator");
+
+       // Create a new enumerator
+       r = loc_database_enumerator_new(&e->e, self->db, LOC_DB_ENUMERATE_NETWORKS, 0);
+       if (r)
+               return luaL_error(L, "Could not create enumerator: %s\n", strerror(errno));
+
+       // Push the closure onto the stack
+       lua_pushcclosure(L, DatabaseEnumerator_next_network, 1);
+
+       return 1;
+}
+
+static const struct luaL_Reg database_functions[] = {
+       { "created_at", Database_created_at },
+       { "get_as", Database_get_as },
+       { "get_description", Database_get_description },
+       { "get_country", Database_get_country },
+       { "get_license", Database_get_license },
+       { "get_vendor", Database_get_vendor },
+       { "open", Database_open },
+       { "lookup", Database_lookup },
+       { "list_networks", Database_list_networks },
+       { "verify", Database_verify },
+       { "__gc", Database_gc },
+       { NULL, NULL },
+};
+
+int register_database(lua_State* L) {
+       return register_class(L, "location.Database", database_functions);
+}
+
+static const struct luaL_Reg database_enumerator_functions[] = {
+       { "__gc", DatabaseEnumerator_gc },
+       { NULL, NULL },
+};
+
+int register_database_enumerator(lua_State* L) {
+       return register_class(L, "location.DatabaseEnumerator", database_enumerator_functions);
+}
diff --git a/src/lua/database.h b/src/lua/database.h
new file mode 100644 (file)
index 0000000..6a5aa4d
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+       libloc - A library to determine the location of someone on the Internet
+
+       Copyright (C) 2024 IPFire Development Team <info@ipfire.org>
+
+       This library is free software; you can redistribute it and/or
+       modify it under the terms of the GNU Lesser General Public
+       License as published by the Free Software Foundation; either
+       version 2.1 of the License, or (at your option) any later version.
+
+       This library is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+       Lesser General Public License for more details.
+*/
+
+#ifndef LUA_LOCATION_DATABASE_H
+#define LUA_LOCATION_DATABASE_H
+
+#include <lua.h>
+#include <lauxlib.h>
+
+int register_database(lua_State* L);
+int register_database_enumerator(lua_State* L);
+
+#endif /* LUA_LOCATION_DATABASE_H */
diff --git a/src/lua/location.c b/src/lua/location.c
new file mode 100644 (file)
index 0000000..b7d2c0e
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+       libloc - A library to determine the location of someone on the Internet
+
+       Copyright (C) 2024 IPFire Development Team <info@ipfire.org>
+
+       This library is free software; you can redistribute it and/or
+       modify it under the terms of the GNU Lesser General Public
+       License as published by the Free Software Foundation; either
+       version 2.1 of the License, or (at your option) any later version.
+
+       This library is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+       Lesser General Public License for more details.
+*/
+
+#include <errno.h>
+#include <string.h>
+
+#include <lua.h>
+#include <lauxlib.h>
+#include <lualib.h>
+
+#include <libloc/libloc.h>
+#include <libloc/network.h>
+
+#include "location.h"
+#include "as.h"
+#include "compat.h"
+#include "country.h"
+#include "database.h"
+#include "network.h"
+
+struct loc_ctx* ctx = NULL;
+
+static int version(lua_State* L) {
+       lua_pushstring(L, PACKAGE_VERSION);
+       return 1;
+}
+
+static const struct luaL_Reg location_functions[] = {
+       { "version", version },
+       { NULL, NULL },
+};
+
+int luaopen_location(lua_State* L) {
+       int r;
+
+       // Initialize the context
+       r = loc_new(&ctx);
+       if (r)
+               return luaL_error(L,
+                       "Could not initialize location context: %s\n", strerror(errno));
+
+       // Register functions
+       luaL_newlib(L, location_functions);
+
+       // Register AS type
+       register_as(L);
+
+       lua_setfield(L, -2, "AS");
+
+       // Register Country type
+       register_country(L);
+
+       lua_setfield(L, -2, "Country");
+
+       // Register Database type
+       register_database(L);
+
+       lua_setfield(L, -2, "Database");
+
+       // Register DatabaseEnumerator type
+       register_database_enumerator(L);
+
+       lua_setfield(L, -2, "DatabaseEnumerator");
+
+       // Register Network type
+       register_network(L);
+
+       lua_setfield(L, -2, "Network");
+
+       // Set DATABASE_PATH
+       lua_pushstring(L, LIBLOC_DEFAULT_DATABASE_PATH);
+       lua_setfield(L, -2, "DATABASE_PATH");
+
+       // Add flags
+       lua_pushnumber(L, LOC_NETWORK_FLAG_ANONYMOUS_PROXY);
+       lua_setfield(L, -2, "NETWORK_FLAG_ANONYMOUS_PROXY");
+
+       lua_pushnumber(L, LOC_NETWORK_FLAG_SATELLITE_PROVIDER);
+       lua_setfield(L, -2, "NETWORK_FLAG_SATELLITE_PROVIDER");
+
+       lua_pushnumber(L, LOC_NETWORK_FLAG_ANYCAST);
+       lua_setfield(L, -2, "NETWORK_FLAG_ANYCAST");
+
+       lua_pushnumber(L, LOC_NETWORK_FLAG_DROP);
+       lua_setfield(L, -2, "NETWORK_FLAG_DROP");
+
+       return 1;
+}
diff --git a/src/lua/location.h b/src/lua/location.h
new file mode 100644 (file)
index 0000000..0708988
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+       libloc - A library to determine the location of someone on the Internet
+
+       Copyright (C) 2024 IPFire Development Team <info@ipfire.org>
+
+       This library is free software; you can redistribute it and/or
+       modify it under the terms of the GNU Lesser General Public
+       License as published by the Free Software Foundation; either
+       version 2.1 of the License, or (at your option) any later version.
+
+       This library is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+       Lesser General Public License for more details.
+*/
+
+#ifndef LUA_LOCATION_LOCATION_H
+#define LUA_LOCATION_LOCATION_H
+
+#include <lua.h>
+
+#include <libloc/libloc.h>
+
+#include "compat.h"
+
+extern struct loc_ctx* ctx;
+
+int luaopen_location(lua_State* L);
+
+static inline int register_class(lua_State* L,
+               const char* name, const struct luaL_Reg* functions) {
+       // Create a new metatable
+       luaL_newmetatable(L, name);
+
+       // Set functions
+       luaL_setfuncs(L, functions, 0);
+
+       // Configure metatable
+       lua_pushvalue(L, -1);
+       lua_setfield(L, -2, "__index");
+
+       return 1;
+}
+
+#endif /* LUA_LOCATION_LOCATION_H */
diff --git a/src/lua/network.c b/src/lua/network.c
new file mode 100644 (file)
index 0000000..2da6a1d
--- /dev/null
@@ -0,0 +1,229 @@
+/*
+       libloc - A library to determine the location of someone on the Internet
+
+       Copyright (C) 2024 IPFire Development Team <info@ipfire.org>
+
+       This library is free software; you can redistribute it and/or
+       modify it under the terms of the GNU Lesser General Public
+       License as published by the Free Software Foundation; either
+       version 2.1 of the License, or (at your option) any later version.
+
+       This library is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+       Lesser General Public License for more details.
+*/
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <lua.h>
+#include <lauxlib.h>
+
+#include <libloc/network.h>
+
+#include "location.h"
+#include "compat.h"
+#include "network.h"
+
+typedef struct network {
+       struct loc_network* network;
+} Network;
+
+static Network* luaL_checknetwork(lua_State* L, int i) {
+       void* userdata = luaL_checkudata(L, i, "location.Network");
+
+       // Throw an error if the argument doesn't match
+       luaL_argcheck(L, userdata, i, "Network expected");
+
+       return (Network*)userdata;
+}
+
+int create_network(lua_State* L, struct loc_network* network) {
+       // Allocate a new object
+       Network* self = (Network*)lua_newuserdata(L, sizeof(*self));
+
+       // Set metatable
+       luaL_setmetatable(L, "location.Network");
+
+       // Store network
+       self->network = loc_network_ref(network);
+
+       return 1;
+}
+
+static int Network_new(lua_State* L) {
+       struct loc_network* network = NULL;
+       const char* n = NULL;
+       int r;
+
+       // Fetch the network
+       n = luaL_checkstring(L, 1);
+
+       // Parse the string
+       r = loc_network_new_from_string(ctx, &network, n);
+       if (r)
+               return luaL_error(L, "Could not create network %s: %s\n", n, strerror(errno));
+
+       // Return the network
+       r = create_network(L, network);
+       loc_network_unref(network);
+
+       return r;
+}
+
+static int Network_gc(lua_State* L) {
+       Network* self = luaL_checknetwork(L, 1);
+
+       // Free the network
+       if (self->network) {
+               loc_network_unref(self->network);
+               self->network = NULL;
+       }
+
+       return 0;
+}
+
+static int Network_tostring(lua_State* L) {
+       Network* self = luaL_checknetwork(L, 1);
+
+       // Push string representation of the network
+       lua_pushstring(L, loc_network_str(self->network));
+
+       return 1;
+}
+
+// ASN
+
+static int Network_get_asn(lua_State* L) {
+       Network* self = luaL_checknetwork(L, 1);
+
+       uint32_t asn = loc_network_get_asn(self->network);
+
+       // Push ASN
+       if (asn)
+               lua_pushnumber(L, asn);
+       else
+               lua_pushnil(L);
+
+       return 1;
+}
+
+// Family
+
+static int Network_get_family(lua_State* L) {
+       Network* self = luaL_checknetwork(L, 1);
+
+       // Push family
+       lua_pushnumber(L, loc_network_address_family(self->network));
+
+       return 1;
+}
+
+// Country Code
+
+static int Network_get_country_code(lua_State* L) {
+       Network* self = luaL_checknetwork(L, 1);
+
+       const char* country_code = loc_network_get_country_code(self->network);
+
+       // Push country code
+       if (country_code && *country_code)
+               lua_pushstring(L, country_code);
+       else
+               lua_pushnil(L);
+
+       return 1;
+}
+
+// Has Flag?
+
+static int Network_has_flag(lua_State* L) {
+       Network* self = luaL_checknetwork(L, 1);
+
+       // Fetch flag
+       int flag = luaL_checknumber(L, 2);
+
+       // Push result
+       lua_pushboolean(L, loc_network_has_flag(self->network, flag));
+
+       return 1;
+}
+
+// Subnets
+
+static int Network_subnets(lua_State* L) {
+       struct loc_network* subnet1 = NULL;
+       struct loc_network* subnet2 = NULL;
+       int r;
+
+       Network* self = luaL_checknetwork(L, 1);
+
+       // Make subnets
+       r = loc_network_subnets(self->network, &subnet1, &subnet2);
+       if (r)
+               return luaL_error(L, "Could not create subnets of %s: %s\n",
+                       loc_network_str(self->network), strerror(errno));
+
+       // Create a new table
+       lua_createtable(L, 2, 0);
+
+       // Create the networks & push them onto the table
+       create_network(L, subnet1);
+       loc_network_unref(subnet1);
+       lua_rawseti(L, -2, 1);
+
+       create_network(L, subnet2);
+       loc_network_unref(subnet2);
+       lua_rawseti(L, -2, 2);
+
+       return 1;
+}
+
+// Reverse Pointer
+
+static int Network_reverse_pointer(lua_State* L) {
+       char* rp = NULL;
+
+       Network* self = luaL_checknetwork(L, 1);
+
+       // Fetch the suffix
+       const char* suffix = luaL_optstring(L, 2, NULL);
+
+       // Make the reverse pointer
+       rp = loc_network_reverse_pointer(self->network, suffix);
+       if (!rp) {
+               switch (errno) {
+                       case ENOTSUP:
+                               lua_pushnil(L);
+                               return 1;
+
+                       default:
+                               return luaL_error(L, "Could not create reverse pointer: %s\n", strerror(errno));
+               }
+       }
+
+       // Return the response
+       lua_pushstring(L, rp);
+       free(rp);
+
+       return 1;
+}
+
+static const struct luaL_Reg Network_functions[] = {
+       { "new", Network_new },
+       { "get_asn", Network_get_asn },
+       { "get_country_code", Network_get_country_code },
+       { "get_family", Network_get_family },
+       { "has_flag", Network_has_flag },
+       { "reverse_pointer", Network_reverse_pointer },
+       { "subnets", Network_subnets },
+       { "__gc", Network_gc },
+       { "__tostring", Network_tostring },
+       { NULL, NULL },
+};
+
+int register_network(lua_State* L) {
+       return register_class(L, "location.Network", Network_functions);
+}
diff --git a/src/lua/network.h b/src/lua/network.h
new file mode 100644 (file)
index 0000000..130aa2f
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+       libloc - A library to determine the location of someone on the Internet
+
+       Copyright (C) 2024 IPFire Development Team <info@ipfire.org>
+
+       This library is free software; you can redistribute it and/or
+       modify it under the terms of the GNU Lesser General Public
+       License as published by the Free Software Foundation; either
+       version 2.1 of the License, or (at your option) any later version.
+
+       This library is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+       Lesser General Public License for more details.
+*/
+
+#ifndef LUA_LOCATION_NETWORK_H
+#define LUA_LOCATION_NETWORK_H
+
+#include <lua.h>
+#include <lauxlib.h>
+
+#include <libloc/network.h>
+
+int register_network(lua_State* L);
+
+int create_network(lua_State* L, struct loc_network* network);
+
+#endif /* LUA_LOCATION_NETWORK_H */
index 698d3abce98bd3380205e1987881fcef91d0e6df..bca44221c011ec881bc9c95d242057add83f2fa8 100644 (file)
 #include <stdlib.h>
 #include <time.h>
 
-#include <loc/libloc.h>
-#include <loc/network.h>
-#include <loc/private.h>
+#include <libloc/address.h>
+#include <libloc/libloc.h>
+#include <libloc/network.h>
+#include <libloc/private.h>
 
 struct loc_network_list {
        struct loc_ctx* ctx;
@@ -32,14 +33,18 @@ struct loc_network_list {
        size_t size;
 };
 
-static int loc_network_list_grow(struct loc_network_list* list, size_t size) {
+static int loc_network_list_grow(struct loc_network_list* list) {
+       size_t size = list->elements_size * 2;
+       if (size < 1024)
+               size = 1024;
+
        DEBUG(list->ctx, "Growing network list %p by %zu to %zu\n",
                list, size, list->elements_size + size);
 
        struct loc_network** elements = reallocarray(list->elements,
                        list->elements_size + size, sizeof(*list->elements));
        if (!elements)
-               return -errno;
+               return 1;
 
        list->elements = elements;
        list->elements_size += size;
@@ -78,9 +83,6 @@ static void loc_network_list_free(struct loc_network_list* list) {
 }
 
 LOC_EXPORT struct loc_network_list* loc_network_list_unref(struct loc_network_list* list) {
-       if (!list)
-               return NULL;
-
        if (--list->refcount > 0)
                return list;
 
@@ -112,15 +114,12 @@ LOC_EXPORT void loc_network_list_clear(struct loc_network_list* list) {
 
 LOC_EXPORT void loc_network_list_dump(struct loc_network_list* list) {
        struct loc_network* network;
-       char* s;
 
        for (unsigned int i = 0; i < list->size; i++) {
                network = list->elements[i];
 
-               s = loc_network_str(network);
-
-               INFO(list->ctx, "%4d: %s\n", i, s);
-               free(s);
+               INFO(list->ctx, "%4d: %s\n",
+                       i, loc_network_str(network));
        }
 }
 
@@ -223,7 +222,7 @@ LOC_EXPORT int loc_network_list_push(struct loc_network_list* list, struct loc_n
 
        // Check if we have space left
        if (list->size >= list->elements_size) {
-               int r = loc_network_list_grow(list, 64);
+               int r = loc_network_list_grow(list);
                if (r)
                        return r;
        }
@@ -277,6 +276,29 @@ LOC_EXPORT struct loc_network* loc_network_list_pop_first(struct loc_network_lis
        return network;
 }
 
+int loc_network_list_remove(struct loc_network_list* list, struct loc_network* network) {
+       int found = 0;
+
+       // Find the network on the list
+       off_t index = loc_network_list_find(list, network, &found);
+
+       // Nothing to do if the network wasn't found
+       if (!found)
+               return 0;
+
+       // Dereference the network at the position
+       loc_network_unref(list->elements[index]);
+
+       // Move all other elements back
+       for (unsigned int i = index; i < list->size - 1; i++)
+               list->elements[i] = list->elements[i+1];
+
+       // The list is shorter now
+       --list->size;
+
+       return 0;
+}
+
 LOC_EXPORT int loc_network_list_contains(struct loc_network_list* list, struct loc_network* network) {
        int found = 0;
 
@@ -297,3 +319,121 @@ LOC_EXPORT int loc_network_list_merge(
 
        return 0;
 }
+
+int loc_network_list_summarize(struct loc_ctx* ctx,
+               const struct in6_addr* first, const struct in6_addr* last, struct loc_network_list** list) {
+       int r;
+
+       if (!list) {
+               errno = EINVAL;
+               return 1;
+       }
+
+       DEBUG(ctx, "Summarizing %s - %s\n", loc_address_str(first), loc_address_str(last));
+
+       const int family1 = loc_address_family(first);
+       const int family2 = loc_address_family(last);
+
+       // Check if address families match
+       if (family1 != family2) {
+               ERROR(ctx, "Address families do not match\n");
+               errno = EINVAL;
+               return 1;
+       }
+
+       // Check if the last address is larger than the first address
+       if (loc_address_cmp(first, last) >= 0) {
+               ERROR(ctx, "The first address must be smaller than the last address\n");
+               errno = EINVAL;
+               return 1;
+       }
+
+       struct loc_network* network = NULL;
+       struct in6_addr start = *first;
+
+       const int family_bit_length = loc_address_family_bit_length(family1);
+
+       while (loc_address_cmp(&start, last) <= 0) {
+               struct in6_addr num;
+               int bits1;
+
+               // Find the number of trailing zeroes of the start address
+               if (loc_address_all_zeroes(&start))
+                       bits1 = family_bit_length;
+               else {
+                       bits1 = loc_address_count_trailing_zero_bits(&start);
+                       if (bits1 > family_bit_length)
+                               bits1 = family_bit_length;
+               }
+
+               // Subtract the start address from the last address and add one
+               // (i.e. how many addresses are in this network?)
+               r = loc_address_sub(&num, last, &start);
+               if (r)
+                       return r;
+
+               loc_address_increment(&num);
+
+               // How many bits do we need to represent this address?
+               int bits2 = loc_address_bit_length(&num) - 1;
+
+               // Select the smaller one
+               int bits = (bits1 > bits2) ? bits2 : bits1;
+
+               // Create a network
+               r = loc_network_new(ctx, &network, &start, family_bit_length - bits);
+               if (r)
+                       return r;
+
+               DEBUG(ctx, "Found network %s\n", loc_network_str(network));
+
+               // Push network on the list
+               r = loc_network_list_push(*list, network);
+               if (r) {
+                       loc_network_unref(network);
+                       return r;
+               }
+
+               // The next network starts right after this one
+               start = *loc_network_get_last_address(network);
+
+               // If we have reached the end of possible IP addresses, we stop
+               if (loc_address_all_ones(&start))
+                       break;
+
+               loc_address_increment(&start);
+       }
+
+       return 0;
+}
+
+void loc_network_list_remove_with_prefix_smaller_than(
+               struct loc_network_list* list, const unsigned int prefix) {
+       unsigned int p = 0;
+
+       // Count how many networks were removed
+       unsigned int removed = 0;
+
+       for (unsigned int i = 0; i < list->size; i++) {
+               // Fetch the prefix
+               p = loc_network_prefix(list->elements[i]);
+
+               if (p > prefix) {
+                       // Drop this network
+                       loc_network_unref(list->elements[i]);
+
+                       // Increment counter
+                       removed++;
+
+                       continue;
+               }
+
+               // Move pointers backwards to keep the list filled
+               list->elements[i - removed] = list->elements[i];
+       }
+
+       // Adjust size
+       list->size -= removed;
+
+       return;
+}
diff --git a/src/network-tree.c b/src/network-tree.c
new file mode 100644 (file)
index 0000000..7096625
--- /dev/null
@@ -0,0 +1,662 @@
+/*
+       libloc - A library to determine the location of someone on the Internet
+
+       Copyright (C) 2024 IPFire Development Team <info@ipfire.org>
+
+       This library is free software; you can redistribute it and/or
+       modify it under the terms of the GNU Lesser General Public
+       License as published by the Free Software Foundation; either
+       version 2.1 of the License, or (at your option) any later version.
+
+       This library is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+       Lesser General Public License for more details.
+*/
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <errno.h>
+
+#include <libloc/libloc.h>
+#include <libloc/address.h>
+#include <libloc/network-tree.h>
+#include <libloc/private.h>
+
+struct loc_network_tree {
+       struct loc_ctx* ctx;
+       int refcount;
+
+       struct loc_network_tree_node* root;
+};
+
+struct loc_network_tree_node {
+       struct loc_ctx* ctx;
+       int refcount;
+
+       struct loc_network_tree_node* zero;
+       struct loc_network_tree_node* one;
+
+       struct loc_network* network;
+
+       // Set if deleted
+       int deleted:1;
+};
+
+int loc_network_tree_new(struct loc_ctx* ctx, struct loc_network_tree** tree) {
+       struct loc_network_tree* t = calloc(1, sizeof(*t));
+       if (!t)
+               return 1;
+
+       t->ctx = loc_ref(ctx);
+       t->refcount = 1;
+
+       // Create the root node
+       int r = loc_network_tree_node_new(ctx, &t->root);
+       if (r) {
+               loc_network_tree_unref(t);
+               return r;
+       }
+
+       DEBUG(t->ctx, "Network tree allocated at %p\n", t);
+       *tree = t;
+       return 0;
+}
+
+struct loc_network_tree_node* loc_network_tree_get_root(struct loc_network_tree* tree) {
+       return loc_network_tree_node_ref(tree->root);
+}
+
+static struct loc_network_tree_node* loc_network_tree_get_node(struct loc_network_tree_node* node, int path) {
+       struct loc_network_tree_node** n = NULL;
+       int r;
+
+       switch (path) {
+               case 0:
+                       n = &node->zero;
+                       break;
+
+               case 1:
+                       n = &node->one;
+                       break;
+
+               default:
+                       errno = EINVAL;
+                       return NULL;
+       }
+
+       // If the node existed, but has been deleted, we undelete it
+       if (*n && (*n)->deleted) {
+               (*n)->deleted = 0;
+
+       // If the desired node doesn't exist, yet, we will create it
+       } else if (!*n) {
+               r = loc_network_tree_node_new(node->ctx, n);
+               if (r)
+                       return NULL;
+       }
+
+       return *n;
+}
+
+static struct loc_network_tree_node* loc_network_tree_get_path(struct loc_network_tree* tree, const struct in6_addr* address, unsigned int prefix) {
+       struct loc_network_tree_node* node = tree->root;
+
+       for (unsigned int i = 0; i < prefix; i++) {
+               // Check if the ith bit is one or zero
+               node = loc_network_tree_get_node(node, loc_address_get_bit(address, i));
+       }
+
+       return node;
+}
+
+static int __loc_network_tree_walk(struct loc_ctx* ctx, struct loc_network_tree_node* node,
+               int(*filter_callback)(struct loc_network* network, void* data),
+               int(*callback)(struct loc_network* network, void* data), void* data) {
+       int r;
+
+       // If the node has been deleted, don't process it
+       if (node->deleted)
+               return 0;
+
+       // Finding a network ends the walk here
+       if (node->network) {
+               if (filter_callback) {
+                       int f = filter_callback(node->network, data);
+                       if (f < 0)
+                               return f;
+
+                       // Skip network if filter function returns value greater than zero
+                       if (f > 0)
+                               return 0;
+               }
+
+               r = callback(node->network, data);
+               if (r)
+                       return r;
+       }
+
+       // Walk down on the left side of the tree first
+       if (node->zero) {
+               r = __loc_network_tree_walk(ctx, node->zero, filter_callback, callback, data);
+               if (r)
+                       return r;
+       }
+
+       // Then walk on the other side
+       if (node->one) {
+               r = __loc_network_tree_walk(ctx, node->one, filter_callback, callback, data);
+               if (r)
+                       return r;
+       }
+
+       return 0;
+}
+
+int loc_network_tree_walk(struct loc_network_tree* tree,
+               int(*filter_callback)(struct loc_network* network, void* data),
+               int(*callback)(struct loc_network* network, void* data), void* data) {
+       return __loc_network_tree_walk(tree->ctx, tree->root, filter_callback, callback, data);
+}
+
+static void loc_network_tree_free(struct loc_network_tree* tree) {
+       DEBUG(tree->ctx, "Releasing network tree at %p\n", tree);
+
+       loc_network_tree_node_unref(tree->root);
+
+       loc_unref(tree->ctx);
+       free(tree);
+}
+
+struct loc_network_tree* loc_network_tree_unref(struct loc_network_tree* tree) {
+       if (--tree->refcount > 0)
+               return tree;
+
+       loc_network_tree_free(tree);
+       return NULL;
+}
+
+static int __loc_network_tree_dump(struct loc_network* network, void* data) {
+       struct loc_ctx* ctx = data;
+
+       DEBUG(ctx, "Dumping network at %p\n", network);
+
+       const char* s = loc_network_str(network);
+       if (!s)
+               return 1;
+
+       INFO(ctx, "%s\n", s);
+
+       return 0;
+}
+
+int loc_network_tree_dump(struct loc_network_tree* tree) {
+       DEBUG(tree->ctx, "Dumping network tree at %p\n", tree);
+
+       return loc_network_tree_walk(tree, NULL, __loc_network_tree_dump, tree->ctx);
+}
+
+int loc_network_tree_add_network(struct loc_network_tree* tree, struct loc_network* network) {
+       DEBUG(tree->ctx, "Adding network %p to tree %p\n", network, tree);
+
+       const struct in6_addr* first_address = loc_network_get_first_address(network);
+       const unsigned int prefix = loc_network_raw_prefix(network);
+
+       struct loc_network_tree_node* node = loc_network_tree_get_path(tree, first_address, prefix);
+       if (!node) {
+               ERROR(tree->ctx, "Could not find a node\n");
+               return -ENOMEM;
+       }
+
+       // Check if node has not been set before
+       if (node->network) {
+               DEBUG(tree->ctx, "There is already a network at this path: %s\n",
+                       loc_network_str(node->network));
+               return -EBUSY;
+       }
+
+       // Point node to the network
+       node->network = loc_network_ref(network);
+
+       return 0;
+}
+
+static int loc_network_tree_delete_network(
+               struct loc_network_tree* tree, struct loc_network* network) {
+       struct loc_network_tree_node* node = NULL;
+
+       DEBUG(tree->ctx, "Deleting network %s from tree...\n", loc_network_str(network));
+
+       const struct in6_addr* first_address = loc_network_get_first_address(network);
+               const unsigned int prefix = loc_network_raw_prefix(network);
+
+       node = loc_network_tree_get_path(tree, first_address, prefix);
+       if (!node) {
+               ERROR(tree->ctx, "Network was not found in tree %s\n", loc_network_str(network));
+               return 1;
+       }
+
+       // Drop the network
+       if (node->network) {
+               loc_network_unref(node->network);
+               node->network = NULL;
+       }
+
+       // Mark the node as deleted if it was a leaf
+       if (!node->zero && !node->one)
+               node->deleted = 1;
+
+       return 0;
+}
+
+static size_t __loc_network_tree_count_nodes(struct loc_network_tree_node* node) {
+       size_t counter = 1;
+
+       // Don't count deleted nodes
+       if (node->deleted)
+               return 0;
+
+       if (node->zero)
+               counter += __loc_network_tree_count_nodes(node->zero);
+
+       if (node->one)
+               counter += __loc_network_tree_count_nodes(node->one);
+
+       return counter;
+}
+
+size_t loc_network_tree_count_nodes(struct loc_network_tree* tree) {
+       return __loc_network_tree_count_nodes(tree->root);
+}
+
+int loc_network_tree_node_new(struct loc_ctx* ctx, struct loc_network_tree_node** node) {
+       struct loc_network_tree_node* n = calloc(1, sizeof(*n));
+       if (!n)
+               return -ENOMEM;
+
+       n->ctx = loc_ref(ctx);
+       n->refcount = 1;
+
+       n->zero = n->one = NULL;
+
+       DEBUG(n->ctx, "Network node allocated at %p\n", n);
+       *node = n;
+       return 0;
+}
+
+struct loc_network_tree_node* loc_network_tree_node_ref(struct loc_network_tree_node* node) {
+       if (node)
+               node->refcount++;
+
+       return node;
+}
+
+static void loc_network_tree_node_free(struct loc_network_tree_node* node) {
+       DEBUG(node->ctx, "Releasing network node at %p\n", node);
+
+       if (node->network)
+               loc_network_unref(node->network);
+
+       if (node->zero)
+               loc_network_tree_node_unref(node->zero);
+
+       if (node->one)
+               loc_network_tree_node_unref(node->one);
+
+       loc_unref(node->ctx);
+       free(node);
+}
+
+struct loc_network_tree_node* loc_network_tree_node_unref(struct loc_network_tree_node* node) {
+       if (--node->refcount > 0)
+               return node;
+
+       loc_network_tree_node_free(node);
+       return NULL;
+}
+
+struct loc_network_tree_node* loc_network_tree_node_get(struct loc_network_tree_node* node, unsigned int index) {
+       if (index == 0)
+               node = node->zero;
+       else
+               node = node->one;
+
+       if (!node)
+               return NULL;
+
+       return loc_network_tree_node_ref(node);
+}
+
+int loc_network_tree_node_is_leaf(struct loc_network_tree_node* node) {
+       return (!!node->network);
+}
+
+struct loc_network* loc_network_tree_node_get_network(struct loc_network_tree_node* node) {
+       return loc_network_ref(node->network);
+}
+
+/*
+       Merge the tree!
+*/
+
+struct loc_network_tree_merge_ctx {
+       struct loc_network_tree* tree;
+       struct loc_network_list* networks;
+       unsigned int merged;
+};
+
+static int loc_network_tree_merge_step(struct loc_network* network, void* data) {
+       struct loc_network_tree_merge_ctx* ctx = (struct loc_network_tree_merge_ctx*)data;
+       struct loc_network* n = NULL;
+       struct loc_network* m = NULL;
+       int r;
+
+       // How many networks do we have?
+       size_t i = loc_network_list_size(ctx->networks);
+
+       // If the list is empty, just add the network
+       if (i == 0)
+               return loc_network_list_push(ctx->networks, network);
+
+       while (i--) {
+               // Fetch the last network of the list
+               n = loc_network_list_get(ctx->networks, i);
+
+               // Try to merge the two networks
+               r = loc_network_merge(&m, n, network);
+               if (r)
+                       goto ERROR;
+
+               // Did we get a result?
+               if (m) {
+                       DEBUG(ctx->tree->ctx, "Merged networks %s + %s -> %s\n",
+                               loc_network_str(n), loc_network_str(network), loc_network_str(m));
+
+                       // Add the new network
+                       r = loc_network_tree_add_network(ctx->tree, m);
+                       switch (r) {
+                               case 0:
+                                       break;
+
+                               // There might already be a network
+                               case -EBUSY:
+                                       r = 0;
+                                       goto ERROR;
+
+                               default:
+                                       goto ERROR;
+                       }
+
+                       // Remove the merge networks
+                       r = loc_network_tree_delete_network(ctx->tree, network);
+                       if (r)
+                               goto ERROR;
+
+                       r = loc_network_tree_delete_network(ctx->tree, n);
+                       if (r)
+                               goto ERROR;
+
+                       // Add the new network to the stack
+                       r = loc_network_list_push(ctx->networks, m);
+                       if (r)
+                               goto ERROR;
+
+                       // Remove the previous network from the stack
+                       r = loc_network_list_remove(ctx->networks, n);
+                       if (r)
+                               goto ERROR;
+
+                       // Count merges
+                       ctx->merged++;
+
+                       // Try merging the new network with others
+                       r = loc_network_tree_merge_step(m, data);
+                       if (r)
+                               goto ERROR;
+
+                       loc_network_unref(m);
+                       m = NULL;
+
+                       // Once we have found a merge, we are done
+                       break;
+
+               // If we could not merge the two networks, we add the current one
+               } else {
+                       r = loc_network_list_push(ctx->networks, network);
+                       if (r)
+                               goto ERROR;
+               }
+
+               loc_network_unref(n);
+               n = NULL;
+       }
+
+       const unsigned int prefix = loc_network_prefix(network);
+
+       // Remove any networks that we cannot merge
+       loc_network_list_remove_with_prefix_smaller_than(ctx->networks, prefix);
+
+ERROR:
+       if (m)
+               loc_network_unref(m);
+       if (n)
+               loc_network_unref(n);
+
+       return r;
+}
+
+static int loc_network_tree_merge(struct loc_network_tree* tree) {
+       struct loc_network_tree_merge_ctx ctx = {
+               .tree     = tree,
+               .networks = NULL,
+               .merged   = 0,
+       };
+       int r;
+
+       // Create a new list
+       r = loc_network_list_new(tree->ctx, &ctx.networks);
+       if (r)
+               goto ERROR;
+
+       // Walk through the entire tree
+       r = loc_network_tree_walk(tree, NULL, loc_network_tree_merge_step, &ctx);
+       if (r)
+               goto ERROR;
+
+       DEBUG(tree->ctx, "%u network(s) have been merged\n", ctx.merged);
+
+ERROR:
+       if (ctx.networks)
+               loc_network_list_unref(ctx.networks);
+
+       return r;
+}
+
+/*
+       Deduplicate the tree
+*/
+
+struct loc_network_tree_dedup_ctx {
+       struct loc_network_tree* tree;
+       struct loc_network_list* stack;
+       unsigned int* removed;
+       int family;
+};
+
+static int loc_network_tree_dedup_step(struct loc_network* network, void* data) {
+       struct loc_network_tree_dedup_ctx* ctx = (struct loc_network_tree_dedup_ctx*)data;
+       struct loc_network* n = NULL;
+       int r;
+
+       // Walk through all networks on the stack...
+       for (int i = loc_network_list_size(ctx->stack) - 1; i >= 0; i--) {
+               n = loc_network_list_get(ctx->stack, i);
+
+               // Is network a subnet?
+               if (loc_network_is_subnet(n, network)) {
+                       // Do all properties match?
+                       if (loc_network_properties_cmp(n, network) == 0) {
+                               r = loc_network_tree_delete_network(ctx->tree, network);
+                               if (r)
+                                       goto END;
+
+                               // Count
+                               (*ctx->removed)++;
+
+                               // Once we removed the subnet, we are done
+                               goto END;
+                       }
+
+                       // Once we found a subnet, we are done
+                       break;
+               }
+
+               // If the network wasn't a subnet, we can remove it,
+               // because we won't ever see a subnet again.
+               r = loc_network_list_remove(ctx->stack, n);
+               if (r)
+                       goto END;
+
+               loc_network_unref(n);
+               n = NULL;
+       }
+
+       // If network did not get removed, we push it into the stack
+       r = loc_network_list_push(ctx->stack, network);
+       if (r)
+               return r;
+
+END:
+       if (n)
+               loc_network_unref(n);
+
+       return r;
+}
+
+static int loc_network_tree_dedup_filter(struct loc_network* network, void* data) {
+       const struct loc_network_tree_dedup_ctx* ctx = data;
+
+       // Match address family
+       return ctx->family == loc_network_address_family(network);
+}
+
+static int loc_network_tree_dedup_one(struct loc_network_tree* tree,
+               const int family, unsigned int* removed) {
+       struct loc_network_tree_dedup_ctx ctx = {
+               .tree    = tree,
+               .stack   = NULL,
+               .removed = removed,
+               .family  = family,
+       };
+       int r;
+
+       r = loc_network_list_new(tree->ctx, &ctx.stack);
+       if (r)
+               return r;
+
+       // Walk through the entire tree
+       r = loc_network_tree_walk(tree,
+               loc_network_tree_dedup_filter, loc_network_tree_dedup_step, &ctx);
+       if (r)
+               goto ERROR;
+
+ERROR:
+       if (ctx.stack)
+               loc_network_list_unref(ctx.stack);
+
+       return r;
+}
+
+static int loc_network_tree_dedup(struct loc_network_tree* tree) {
+       unsigned int removed = 0;
+       int r;
+
+       r = loc_network_tree_dedup_one(tree, AF_INET6, &removed);
+       if (r)
+               return r;
+
+       r = loc_network_tree_dedup_one(tree, AF_INET, &removed);
+       if (r)
+               return r;
+
+       DEBUG(tree->ctx, "%u network(s) have been removed\n", removed);
+
+       return 0;
+}
+
+static int loc_network_tree_delete_node(struct loc_network_tree* tree,
+               struct loc_network_tree_node** node) {
+       struct loc_network_tree_node* n = *node;
+       int r0 = 1;
+       int r1 = 1;
+
+       // Return for nodes that have already been deleted
+       if (n->deleted)
+               goto DELETE;
+
+       // Delete zero
+       if (n->zero) {
+               r0 = loc_network_tree_delete_node(tree, &n->zero);
+               if (r0 < 0)
+                       return r0;
+       }
+
+       // Delete one
+       if (n->one) {
+               r1 = loc_network_tree_delete_node(tree, &n->one);
+               if (r1 < 0)
+                       return r1;
+       }
+
+       // Don't delete this node if we are a leaf
+       if (n->network)
+               return 0;
+
+       // Don't delete this node if has child nodes that we need
+       if (!r0 || !r1)
+               return 0;
+
+       // Don't delete root
+       if (tree->root == n)
+               return 0;
+
+DELETE:
+       // It is now safe to delete the node
+       loc_network_tree_node_unref(n);
+       *node = NULL;
+
+       return 1;
+}
+
+static int loc_network_tree_delete_nodes(struct loc_network_tree* tree) {
+       int r;
+
+       r = loc_network_tree_delete_node(tree, &tree->root);
+       if (r < 0)
+               return r;
+
+       return 0;
+}
+
+int loc_network_tree_cleanup(struct loc_network_tree* tree) {
+       int r;
+
+       // Deduplicate the tree
+       r = loc_network_tree_dedup(tree);
+       if (r)
+               return r;
+
+       // Merge networks
+       r = loc_network_tree_merge(tree);
+       if (r) {
+               ERROR(tree->ctx, "Could not merge networks: %m\n");
+               return r;
+       }
+
+       // Delete any unneeded nodes
+       r = loc_network_tree_delete_nodes(tree);
+       if (r)
+               return r;
+
+       return 0;
+}
index a6b679c84f4e8909ff737d84fb353cf7dbe19479..69306a91f4b18ed0be8f4280373b34bbb7496c81 100644 (file)
 #  include <endian.h>
 #endif
 
-#include <loc/libloc.h>
-#include <loc/compat.h>
-#include <loc/country.h>
-#include <loc/network.h>
-#include <loc/network-list.h>
-#include <loc/private.h>
+#include <libloc/libloc.h>
+#include <libloc/address.h>
+#include <libloc/compat.h>
+#include <libloc/country.h>
+#include <libloc/network.h>
+#include <libloc/network-list.h>
+#include <libloc/private.h>
 
 struct loc_network {
        struct loc_ctx* ctx;
@@ -44,185 +45,64 @@ struct loc_network {
        char country_code[3];
        uint32_t asn;
        enum loc_network_flags flags;
-};
-
-static int valid_prefix(struct in6_addr* address, unsigned int prefix) {
-       // The prefix cannot be larger than 128 bits
-       if (prefix > 128)
-               return 1;
-
-       // And the prefix cannot be zero
-       if (prefix == 0)
-               return 1;
-
-       // For IPv4-mapped addresses the prefix has to be 96 or lager
-       if (IN6_IS_ADDR_V4MAPPED(address) && prefix <= 96)
-               return 1;
-
-       return 0;
-}
-
-static struct in6_addr prefix_to_bitmask(unsigned int prefix) {
-       struct in6_addr bitmask;
-
-       for (unsigned int i = 0; i < 16; i++)
-               bitmask.s6_addr[i] = 0;
-
-       for (int i = prefix, j = 0; i > 0; i -= 8, j++) {
-               if (i >= 8)
-                       bitmask.s6_addr[j] = 0xff;
-               else
-                       bitmask.s6_addr[j] = 0xff << (8 - i);
-       }
-
-       return bitmask;
-}
-
-static struct in6_addr make_first_address(const struct in6_addr* address, const struct in6_addr* bitmask) {
-       struct in6_addr a;
-
-       // Perform bitwise AND
-       for (unsigned int i = 0; i < 4; i++)
-               a.s6_addr32[i] = address->s6_addr32[i] & bitmask->s6_addr32[i];
-
-       return a;
-}
-
-static struct in6_addr make_last_address(const struct in6_addr* address, const struct in6_addr* bitmask) {
-       struct in6_addr a;
-
-       // Perform bitwise OR
-       for (unsigned int i = 0; i < 4; i++)
-               a.s6_addr32[i] = address->s6_addr32[i] | ~bitmask->s6_addr32[i];
-
-       return a;
-}
-
-static struct in6_addr address_increment(const struct in6_addr* address) {
-       struct in6_addr a = *address;
-
-       for (int octet = 15; octet >= 0; octet--) {
-               if (a.s6_addr[octet] < 255) {
-                       a.s6_addr[octet]++;
-                       break;
-               } else {
-                       a.s6_addr[octet] = 0;
-               }
-       }
 
-       return a;
-}
+       char string[INET6_ADDRSTRLEN + 4];
+};
 
 LOC_EXPORT int loc_network_new(struct loc_ctx* ctx, struct loc_network** network,
                struct in6_addr* address, unsigned int prefix) {
-       // Address cannot be unspecified
-       if (IN6_IS_ADDR_UNSPECIFIED(address)) {
-               DEBUG(ctx, "Start address is unspecified\n");
-               return -EINVAL;
-       }
-
-       // Address cannot be loopback
-       if (IN6_IS_ADDR_LOOPBACK(address)) {
-               DEBUG(ctx, "Start address is loopback address\n");
-               return -EINVAL;
-       }
-
-       // Address cannot be link-local
-       if (IN6_IS_ADDR_LINKLOCAL(address)) {
-               DEBUG(ctx, "Start address cannot be link-local\n");
-               return -EINVAL;
-       }
-
-       // Address cannot be site-local
-       if (IN6_IS_ADDR_SITELOCAL(address)) {
-               DEBUG(ctx, "Start address cannot be site-local\n");
-               return -EINVAL;
-       }
+       struct loc_network* n = NULL;
 
        // Validate the prefix
-       if (valid_prefix(address, prefix) != 0) {
-               DEBUG(ctx, "Invalid prefix: %u\n", prefix);
-               return -EINVAL;
+       if (!loc_address_valid_prefix(address, prefix)) {
+               ERROR(ctx, "Invalid prefix in %s: %u\n", loc_address_str(address), prefix);
+               errno = EINVAL;
+               return 1;
        }
 
-       struct loc_network* n = calloc(1, sizeof(*n));
+       // Allocate a new network
+       n = calloc(1, sizeof(*n));
        if (!n)
-               return -ENOMEM;
+               return 1;
 
        n->ctx = loc_ref(ctx);
        n->refcount = 1;
 
        // Store the prefix
-       n->prefix = prefix;
+       if (IN6_IS_ADDR_V4MAPPED(address))
+               n->prefix = prefix + 96;
+       else
+               n->prefix = prefix;
 
        // Convert the prefix into a bitmask
-       struct in6_addr bitmask = prefix_to_bitmask(n->prefix);
+       const struct in6_addr bitmask = loc_prefix_to_bitmask(n->prefix);
 
        // Store the first and last address in the network
-       n->first_address = make_first_address(address, &bitmask);
-       n->last_address = make_last_address(&n->first_address, &bitmask);
+       n->first_address = loc_address_and(address, &bitmask);
+       n->last_address  = loc_address_or(&n->first_address, &bitmask);
 
        // Set family
-       if (IN6_IS_ADDR_V4MAPPED(&n->first_address))
-               n->family = AF_INET;
-       else
-               n->family = AF_INET6;
+       n->family = loc_address_family(&n->first_address);
 
        DEBUG(n->ctx, "Network allocated at %p\n", n);
        *network = n;
        return 0;
 }
 
-LOC_EXPORT int loc_network_new_from_string(struct loc_ctx* ctx, struct loc_network** network,
-               const char* address_string) {
-       struct in6_addr first_address;
-       char* prefix_string;
-       unsigned int prefix = 128;
-       int r = -EINVAL;
-
-       DEBUG(ctx, "Attempting to parse network %s\n", address_string);
-
-       // Make a copy of the string to work on it
-       char* buffer = strdup(address_string);
-       address_string = prefix_string = buffer;
-
-       // Split address and prefix
-       address_string = strsep(&prefix_string, "/");
-
-       DEBUG(ctx, "  Split into address = %s, prefix = %s\n", address_string, prefix_string);
+LOC_EXPORT int loc_network_new_from_string(struct loc_ctx* ctx,
+               struct loc_network** network, const char* string) {
+       struct in6_addr address;
+       unsigned int prefix;
 
-       // Parse the address
-       r = loc_parse_address(ctx, address_string, &first_address);
+       // Parse the input
+       int r = loc_address_parse(&address, &prefix, string);
        if (r) {
-               DEBUG(ctx, "The address could not be parsed\n");
-               goto FAIL;
-       }
-
-       // If a prefix was given, we will try to parse it
-       if (prefix_string) {
-               // Convert prefix to integer
-               prefix = strtol(prefix_string, NULL, 10);
-
-               if (!prefix) {
-                       DEBUG(ctx, "The prefix was not parsable: %s\n", prefix_string);
-                       goto FAIL;
-               }
-
-               // Map the prefix to IPv6 if needed
-               if (IN6_IS_ADDR_V4MAPPED(&first_address))
-                       prefix += 96;
-       }
-
-FAIL:
-       // Free temporary buffer
-       free(buffer);
-
-       // Exit if the parsing was unsuccessful
-       if (r)
+               ERROR(ctx, "Could not parse network %s: %m\n", string);
                return r;
+       }
 
        // Create a new network
-       return loc_network_new(ctx, network, &first_address, prefix);
+       return loc_network_new(ctx, network, &address, prefix);
 }
 
 LOC_EXPORT struct loc_network* loc_network_ref(struct loc_network* network) {
@@ -239,9 +119,6 @@ static void loc_network_free(struct loc_network* network) {
 }
 
 LOC_EXPORT struct loc_network* loc_network_unref(struct loc_network* network) {
-       if (!network)
-               return NULL;
-
        if (--network->refcount > 0)
                return network;
 
@@ -249,61 +126,27 @@ LOC_EXPORT struct loc_network* loc_network_unref(struct loc_network* network) {
        return NULL;
 }
 
-static int format_ipv6_address(const struct in6_addr* address, char* string, size_t length) {
-       const char* ret = inet_ntop(AF_INET6, address, string, length);
-       if (!ret)
-               return -1;
-
-       return 0;
-}
-
-static int format_ipv4_address(const struct in6_addr* address, char* string, size_t length) {
-       struct in_addr ipv4_address;
-       ipv4_address.s_addr = address->s6_addr32[3];
-
-       const char* ret = inet_ntop(AF_INET, &ipv4_address, string, length);
-       if (!ret)
-               return -1;
-
-       return 0;
-}
-
-LOC_EXPORT char* loc_network_str(struct loc_network* network) {
-       int r;
-       const size_t length = INET6_ADDRSTRLEN + 4;
-
-       char* string = malloc(length);
-       if (!string)
-               return NULL;
-
-       unsigned int prefix = network->prefix;
-
-       switch (network->family) {
-               case AF_INET6:
-                       r = format_ipv6_address(&network->first_address, string, length);
-                       break;
-
-               case AF_INET:
-                       r = format_ipv4_address(&network->first_address, string, length);
-                       prefix -= 96;
-                       break;
-
-               default:
-                       r = -1;
-                       break;
-       }
+LOC_EXPORT const char* loc_network_str(struct loc_network* network) {
+       if (!*network->string) {
+               // Format the address
+               const char* address = loc_address_str(&network->first_address);
+               if (!address)
+                       return NULL;
 
-       if (r) {
-               ERROR(network->ctx, "Could not convert network to string: %s\n", strerror(errno));
-               free(string);
+               // Fetch the prefix
+               unsigned int prefix = loc_network_prefix(network);
 
-               return NULL;
+               // Format the string
+               int r = snprintf(network->string, sizeof(network->string) - 1,
+                       "%s/%u", address, prefix);
+               if (r < 0) {
+                       ERROR(network->ctx, "Could not format network string: %m\n");
+                       *network->string = '\0';
+                       return NULL;
+               }
        }
 
-       // Append prefix
-       sprintf(string + strlen(string), "/%u", prefix);
-
-       return string;
+       return network->string;
 }
 
 LOC_EXPORT int loc_network_address_family(struct loc_network* network) {
@@ -322,62 +165,33 @@ LOC_EXPORT unsigned int loc_network_prefix(struct loc_network* network) {
        return 0;
 }
 
-static char* loc_network_format_address(struct loc_network* network, const struct in6_addr* address) {
-       const size_t length = INET6_ADDRSTRLEN;
-
-       char* string = malloc(length);
-       if (!string)
-               return NULL;
-
-       int r = 0;
-
-       switch (network->family) {
-               case AF_INET6:
-                       r = format_ipv6_address(address, string, length);
-                       break;
-
-               case AF_INET:
-                       r = format_ipv4_address(address, string, length);
-                       break;
-
-               default:
-                       r = -1;
-                       break;
-       }
-
-       if (r) {
-               ERROR(network->ctx, "Could not format IP address to string: %s\n", strerror(errno));
-               free(string);
-
-               return NULL;
-       }
-
-       return string;
+unsigned int loc_network_raw_prefix(struct loc_network* network) {
+       return network->prefix;
 }
 
 LOC_EXPORT const struct in6_addr* loc_network_get_first_address(struct loc_network* network) {
        return &network->first_address;
 }
 
-LOC_EXPORT char* loc_network_format_first_address(struct loc_network* network) {
-       return loc_network_format_address(network, &network->first_address);
+LOC_EXPORT const char* loc_network_format_first_address(struct loc_network* network) {
+       return loc_address_str(&network->first_address);
 }
 
 LOC_EXPORT const struct in6_addr* loc_network_get_last_address(struct loc_network* network) {
        return &network->last_address;
 }
 
-LOC_EXPORT char* loc_network_format_last_address(struct loc_network* network) {
-       return loc_network_format_address(network, &network->last_address);
+LOC_EXPORT const char* loc_network_format_last_address(struct loc_network* network) {
+       return loc_address_str(&network->last_address);
 }
 
-LOC_EXPORT int loc_network_match_address(struct loc_network* network, const struct in6_addr* address) {
+LOC_EXPORT int loc_network_matches_address(struct loc_network* network, const struct in6_addr* address) {
        // Address must be larger than the start address
-       if (in6_addr_cmp(&network->first_address, address) > 0)
+       if (loc_address_cmp(&network->first_address, address) > 0)
                return 0;
 
        // Address must be smaller than the last address
-       if (in6_addr_cmp(&network->last_address, address) < 0)
+       if (loc_address_cmp(&network->last_address, address) < 0)
                return 0;
 
        // The address is inside this network
@@ -404,11 +218,19 @@ LOC_EXPORT int loc_network_set_country_code(struct loc_network* network, const c
        return 0;
 }
 
-LOC_EXPORT int loc_network_match_country_code(struct loc_network* network, const char* country_code) {
+LOC_EXPORT int loc_network_matches_country_code(struct loc_network* network, const char* country_code) {
+       // Search for any special flags
+       const int flag = loc_country_special_code_to_flag(country_code);
+
+       // If we found a flag, we will return whether it is set or not
+       if (flag)
+               return loc_network_has_flag(network, flag);
+
        // Check country code
        if (!loc_country_code_is_valid(country_code))
                return -EINVAL;
 
+       // Check for an exact match
        return (network->country_code[0] == country_code[0])
                && (network->country_code[1] == country_code[1]);
 }
@@ -423,10 +245,6 @@ LOC_EXPORT int loc_network_set_asn(struct loc_network* network, uint32_t asn) {
        return 0;
 }
 
-LOC_EXPORT int loc_network_match_asn(struct loc_network* network, uint32_t asn) {
-       return network->asn == asn;
-}
-
 LOC_EXPORT int loc_network_has_flag(struct loc_network* network, uint32_t flag) {
        return network->flags & flag;
 }
@@ -437,13 +255,9 @@ LOC_EXPORT int loc_network_set_flag(struct loc_network* network, uint32_t flag)
        return 0;
 }
 
-LOC_EXPORT int loc_network_match_flag(struct loc_network* network, uint32_t flag) {
-       return loc_network_has_flag(network, flag);
-}
-
 LOC_EXPORT int loc_network_cmp(struct loc_network* self, struct loc_network* other) {
        // Compare address
-       int r = in6_addr_cmp(&self->first_address, &other->first_address);
+       int r = loc_address_cmp(&self->first_address, &other->first_address);
        if (r)
                return r;
 
@@ -457,19 +271,42 @@ LOC_EXPORT int loc_network_cmp(struct loc_network* self, struct loc_network* oth
        return 0;
 }
 
+int loc_network_properties_cmp(struct loc_network* self, struct loc_network* other) {
+       int r;
+
+       // Check country code
+       r = loc_country_code_cmp(self->country_code, other->country_code);
+       if (r)
+               return r;
+
+       // Check ASN
+       if (self->asn > other->asn)
+               return 1;
+       else if (self->asn < other->asn)
+               return -1;
+
+       // Check flags
+       if (self->flags > other->flags)
+               return 1;
+       else if (self->flags < other->flags)
+               return -1;
+
+       return 0;
+}
+
 LOC_EXPORT int loc_network_overlaps(struct loc_network* self, struct loc_network* other) {
        // Either of the start addresses must be in the other subnet
-       if (loc_network_match_address(self, &other->first_address))
+       if (loc_network_matches_address(self, &other->first_address))
                return 1;
 
-       if (loc_network_match_address(other, &self->first_address))
+       if (loc_network_matches_address(other, &self->first_address))
                return 1;
 
        // Or either of the end addresses is in the other subnet
-       if (loc_network_match_address(self, &other->last_address))
+       if (loc_network_matches_address(self, &other->last_address))
                return 1;
 
-       if (loc_network_match_address(other, &self->last_address))
+       if (loc_network_matches_address(other, &self->last_address))
                return 1;
 
        return 0;
@@ -482,12 +319,12 @@ LOC_EXPORT int loc_network_is_subnet(struct loc_network* self, struct loc_networ
 
        // If the start address of the other network is smaller than this network,
        // it cannot be a subnet.
-       if (in6_addr_cmp(&self->first_address, &other->first_address) > 0)
+       if (loc_address_cmp(&self->first_address, &other->first_address) > 0)
                return 0;
 
        // If the end address of the other network is greater than this network,
        // it cannot be a subnet.
-       if (in6_addr_cmp(&self->last_address, &other->last_address) < 0)
+       if (loc_address_cmp(&self->last_address, &other->last_address) < 0)
                return 0;
 
        return 1;
@@ -500,11 +337,14 @@ LOC_EXPORT int loc_network_subnets(struct loc_network* network,
        *subnet2 = NULL;
 
        // New prefix length
-       unsigned int prefix = network->prefix + 1;
+       unsigned int prefix = loc_network_prefix(network) + 1;
 
        // Check if the new prefix is valid
-       if (valid_prefix(&network->first_address, prefix))
-               return -1;
+       if (!loc_address_valid_prefix(&network->first_address, prefix)) {
+               ERROR(network->ctx, "Invalid prefix: %d\n", prefix);
+               errno = EINVAL;
+               return 1;
+       }
 
        // Create the first half of the network
        r = loc_network_new(network->ctx, subnet1, &network->first_address, prefix);
@@ -512,7 +352,8 @@ LOC_EXPORT int loc_network_subnets(struct loc_network* network,
                return r;
 
        // The next subnet starts after the first one
-       struct in6_addr first_address = address_increment(&(*subnet1)->last_address);
+       struct in6_addr first_address = (*subnet1)->last_address;
+       loc_address_increment(&first_address);
 
        // Create the second half of the network
        r = loc_network_new(network->ctx, subnet2, &first_address, prefix);
@@ -590,6 +431,9 @@ ERROR:
        if (subnet2)
                loc_network_unref(subnet2);
 
+       if (r)
+               DEBUG(network->ctx, "%s has failed with %d\n", __FUNCTION__, r);
+
        return r;
 }
 
@@ -618,15 +462,8 @@ LOC_EXPORT struct loc_network_list* loc_network_exclude(
                struct loc_network* self, struct loc_network* other) {
        struct loc_network_list* list;
 
-#ifdef ENABLE_DEBUG
-       char* n1 = loc_network_str(self);
-       char* n2 = loc_network_str(other);
-
-       DEBUG(self->ctx, "Returning %s excluding %s...\n", n1, n2);
-
-       free(n1);
-       free(n2);
-#endif
+       DEBUG(self->ctx, "Returning %s excluding %s...\n",
+               loc_network_str(self), loc_network_str(other));
 
        // Create a new list with the result
        int r = loc_network_list_new(self->ctx, &list);
@@ -745,7 +582,71 @@ LOC_EXPORT struct loc_network_list* loc_network_exclude_list(
        return subnets;
 }
 
-LOC_EXPORT int loc_network_to_database_v1(struct loc_network* network, struct loc_database_network_v1* dbobj) {
+int loc_network_merge(struct loc_network** n,
+               struct loc_network* n1, struct loc_network* n2) {
+       struct loc_network* network = NULL;
+       struct in6_addr address;
+       int r;
+
+       // Reset pointer
+       *n = NULL;
+
+       DEBUG(n1->ctx, "Attempting to merge %s and %s\n", loc_network_str(n1), loc_network_str(n2));
+
+       // Family must match
+       if (n1->family != n2->family)
+               return 0;
+
+       // The prefix must match, too
+       if (n1->prefix != n2->prefix)
+               return 0;
+
+       // Cannot merge ::/0 or 0.0.0.0/0
+       if (!n1->prefix || !n2->prefix)
+               return 0;
+
+       const unsigned int prefix = loc_network_prefix(n1);
+
+       // How many bits do we need to represent this address?
+       const size_t bitlength = loc_address_bit_length(&n1->first_address);
+
+       // We cannot shorten this any more
+       if (bitlength >= prefix) {
+               DEBUG(n1->ctx, "Cannot shorten this any further because we need at least %jd bits,"
+                       " but only have %d\n", bitlength, prefix);
+
+               return 0;
+       }
+
+       // Increment the last address of the first network
+       address = n1->last_address;
+       loc_address_increment(&address);
+
+       // If they don't match they are not neighbours
+       if (loc_address_cmp(&address, &n2->first_address) != 0)
+               return 0;
+
+       // All properties must match, too
+       if (loc_network_properties_cmp(n1, n2) != 0)
+               return 0;
+
+       // Create a new network object
+       r = loc_network_new(n1->ctx, &network, &n1->first_address, prefix - 1);
+       if (r)
+               return r;
+
+       // Copy everything else
+       loc_country_code_copy(network->country_code, n1->country_code);
+       network->asn = n1->asn;
+       network->flags = n1->flags;
+
+       // Return pointer
+       *n = network;
+
+       return 0;
+}
+
+int loc_network_to_database_v1(struct loc_network* network, struct loc_database_network_v1* dbobj) {
        // Add country code
        loc_country_code_copy(dbobj->country_code, network->country_code);
 
@@ -758,13 +659,17 @@ LOC_EXPORT int loc_network_to_database_v1(struct loc_network* network, struct lo
        return 0;
 }
 
-LOC_EXPORT int loc_network_new_from_database_v1(struct loc_ctx* ctx, struct loc_network** network,
+int loc_network_new_from_database_v1(struct loc_ctx* ctx, struct loc_network** network,
                struct in6_addr* address, unsigned int prefix, const struct loc_database_network_v1* dbobj) {
        char country_code[3] = "\0\0";
 
+       // Adjust prefix for IPv4
+       if (IN6_IS_ADDR_V4MAPPED(address))
+               prefix -= 96;
+
        int r = loc_network_new(ctx, network, address, prefix);
        if (r) {
-               ERROR(ctx, "Could not allocate a new network: %s", strerror(-r));
+               ERROR(ctx, "Could not allocate a new network: %m\n");
                return r;
        }
 
@@ -796,279 +701,120 @@ LOC_EXPORT int loc_network_new_from_database_v1(struct loc_ctx* ctx, struct loc_
        return 0;
 }
 
-struct loc_network_tree {
-       struct loc_ctx* ctx;
-       int refcount;
-
-       struct loc_network_tree_node* root;
-};
-
-struct loc_network_tree_node {
-       struct loc_ctx* ctx;
-       int refcount;
-
-       struct loc_network_tree_node* zero;
-       struct loc_network_tree_node* one;
-
-       struct loc_network* network;
-};
-
-int loc_network_tree_new(struct loc_ctx* ctx, struct loc_network_tree** tree) {
-       struct loc_network_tree* t = calloc(1, sizeof(*t));
-       if (!t)
-               return -ENOMEM;
-
-       t->ctx = loc_ref(ctx);
-       t->refcount = 1;
-
-       // Create the root node
-       int r = loc_network_tree_node_new(ctx, &t->root);
-       if (r) {
-               loc_network_tree_unref(t);
-               return r;
-       }
-
-       DEBUG(t->ctx, "Network tree allocated at %p\n", t);
-       *tree = t;
-       return 0;
-}
-
-struct loc_network_tree_node* loc_network_tree_get_root(struct loc_network_tree* tree) {
-       return loc_network_tree_node_ref(tree->root);
-}
-
-static struct loc_network_tree_node* loc_network_tree_get_node(struct loc_network_tree_node* node, int path) {
-       struct loc_network_tree_node** n;
-
-       if (path == 0)
-               n = &node->zero;
-       else
-               n = &node->one;
-
-       // If the desired node doesn't exist, yet, we will create it
-       if (*n == NULL) {
-               int r = loc_network_tree_node_new(node->ctx, n);
-               if (r)
-                       return NULL;
-       }
-
-       return *n;
-}
+static char* loc_network_reverse_pointer6(struct loc_network* network, const char* suffix) {
+       char* buffer = NULL;
+       int r;
 
-static struct loc_network_tree_node* loc_network_tree_get_path(struct loc_network_tree* tree, const struct in6_addr* address, unsigned int prefix) {
-       struct loc_network_tree_node* node = tree->root;
+       unsigned int prefix = loc_network_prefix(network);
 
-       for (unsigned int i = 0; i < prefix; i++) {
-               // Check if the ith bit is one or zero
-               node = loc_network_tree_get_node(node, in6_addr_get_bit(address, i));
+       // Must border on a nibble
+       if (prefix % 4) {
+               errno = ENOTSUP;
+               return NULL;
        }
 
-       return node;
-}
-
-static int __loc_network_tree_walk(struct loc_ctx* ctx, struct loc_network_tree_node* node,
-               int(*filter_callback)(struct loc_network* network, void* data),
-               int(*callback)(struct loc_network* network, void* data), void* data) {
-       int r;
+       if (!suffix)
+               suffix = "ip6.arpa.";
 
-       // Finding a network ends the walk here
-       if (node->network) {
-               if (filter_callback) {
-                       int f = filter_callback(node->network, data);
-                       if (f < 0)
-                               return f;
-
-                       // Skip network if filter function returns value greater than zero
-                       if (f > 0)
-                               return 0;
-               }
-
-               r = callback(node->network, data);
-               if (r)
-                       return r;
-       }
+       // Initialize the buffer
+       r = asprintf(&buffer, "%s", suffix);
+       if (r < 0)
+               goto ERROR;
 
-       // Walk down on the left side of the tree first
-       if (node->zero) {
-               r = __loc_network_tree_walk(ctx, node->zero, filter_callback, callback, data);
-               if (r)
-                       return r;
+       for (unsigned int i = 0; i < (prefix / 4); i++) {
+               r = asprintf(&buffer, "%x.%s", loc_address_get_nibble(&network->first_address, i), buffer);
+               if (r < 0)
+                       goto ERROR;
        }
 
-       // Then walk on the other side
-       if (node->one) {
-               r = __loc_network_tree_walk(ctx, node->one, filter_callback, callback, data);
-               if (r)
-                       return r;
+       // Add the asterisk
+       if (prefix < 128) {
+               r = asprintf(&buffer, "*.%s", buffer);
+               if (r < 0)
+                       goto ERROR;
        }
 
-       return 0;
-}
+       return buffer;
 
-int loc_network_tree_walk(struct loc_network_tree* tree,
-               int(*filter_callback)(struct loc_network* network, void* data),
-               int(*callback)(struct loc_network* network, void* data), void* data) {
-       return __loc_network_tree_walk(tree->ctx, tree->root, filter_callback, callback, data);
-}
-
-static void loc_network_tree_free(struct loc_network_tree* tree) {
-       DEBUG(tree->ctx, "Releasing network tree at %p\n", tree);
-
-       loc_network_tree_node_unref(tree->root);
-
-       loc_unref(tree->ctx);
-       free(tree);
-}
-
-struct loc_network_tree* loc_network_tree_unref(struct loc_network_tree* tree) {
-       if (--tree->refcount > 0)
-               return tree;
+ERROR:
+       if (buffer)
+               free(buffer);
 
-       loc_network_tree_free(tree);
        return NULL;
 }
 
-static int __loc_network_tree_dump(struct loc_network* network, void* data) {
-       DEBUG(network->ctx, "Dumping network at %p\n", network);
-
-       char* s = loc_network_str(network);
-       if (!s)
-               return 1;
-
-       INFO(network->ctx, "%s\n", s);
-       free(s);
-
-       return 0;
-}
-
-int loc_network_tree_dump(struct loc_network_tree* tree) {
-       DEBUG(tree->ctx, "Dumping network tree at %p\n", tree);
-
-       return loc_network_tree_walk(tree, NULL, __loc_network_tree_dump, NULL);
-}
-
-int loc_network_tree_add_network(struct loc_network_tree* tree, struct loc_network* network) {
-       DEBUG(tree->ctx, "Adding network %p to tree %p\n", network, tree);
+static char* loc_network_reverse_pointer4(struct loc_network* network, const char* suffix) {
+       char* buffer = NULL;
+       int r;
 
-       struct loc_network_tree_node* node = loc_network_tree_get_path(tree,
-                       &network->first_address, network->prefix);
-       if (!node) {
-               ERROR(tree->ctx, "Could not find a node\n");
-               return -ENOMEM;
-       }
+       unsigned int prefix = loc_network_prefix(network);
 
-       // Check if node has not been set before
-       if (node->network) {
-               DEBUG(tree->ctx, "There is already a network at this path\n");
-               return -EBUSY;
+       // Must border on an octet
+       if (prefix % 8) {
+               errno = ENOTSUP;
+               return NULL;
        }
 
-       // Point node to the network
-       node->network = loc_network_ref(network);
-
-       return 0;
-}
-
-static int __loc_network_tree_count(struct loc_network* network, void* data) {
-       size_t* counter = (size_t*)data;
-
-       // Increase the counter for each network
-       counter++;
-
-       return 0;
-}
-
-size_t loc_network_tree_count_networks(struct loc_network_tree* tree) {
-       size_t counter = 0;
-
-       int r = loc_network_tree_walk(tree, NULL, __loc_network_tree_count, &counter);
-       if (r)
-               return r;
-
-       return counter;
-}
-
-static size_t __loc_network_tree_count_nodes(struct loc_network_tree_node* node) {
-       size_t counter = 1;
-
-       if (node->zero)
-               counter += __loc_network_tree_count_nodes(node->zero);
-
-       if (node->one)
-               counter += __loc_network_tree_count_nodes(node->one);
-
-       return counter;
-}
-
-size_t loc_network_tree_count_nodes(struct loc_network_tree* tree) {
-       return __loc_network_tree_count_nodes(tree->root);
-}
-
-int loc_network_tree_node_new(struct loc_ctx* ctx, struct loc_network_tree_node** node) {
-       struct loc_network_tree_node* n = calloc(1, sizeof(*n));
-       if (!n)
-               return -ENOMEM;
+       if (!suffix)
+               suffix = "in-addr.arpa.";
 
-       n->ctx = loc_ref(ctx);
-       n->refcount = 1;
-
-       n->zero = n->one = NULL;
-
-       DEBUG(n->ctx, "Network node allocated at %p\n", n);
-       *node = n;
-       return 0;
-}
-
-struct loc_network_tree_node* loc_network_tree_node_ref(struct loc_network_tree_node* node) {
-       if (node)
-               node->refcount++;
-
-       return node;
-}
+       switch (prefix) {
+               case 32:
+                       r = asprintf(&buffer, "%d.%d.%d.%d.%s",
+                               loc_address_get_octet(&network->first_address, 3),
+                               loc_address_get_octet(&network->first_address, 2),
+                               loc_address_get_octet(&network->first_address, 1),
+                               loc_address_get_octet(&network->first_address, 0),
+                               suffix);
+                       break;
 
-static void loc_network_tree_node_free(struct loc_network_tree_node* node) {
-       DEBUG(node->ctx, "Releasing network node at %p\n", node);
+               case 24:
+                       r = asprintf(&buffer, "*.%d.%d.%d.%s",
+                               loc_address_get_octet(&network->first_address, 2),
+                               loc_address_get_octet(&network->first_address, 1),
+                               loc_address_get_octet(&network->first_address, 0),
+                               suffix);
+                       break;
 
-       if (node->network)
-               loc_network_unref(node->network);
+               case 16:
+                       r = asprintf(&buffer, "*.%d.%d.%s",
+                               loc_address_get_octet(&network->first_address, 1),
+                               loc_address_get_octet(&network->first_address, 0),
+                               suffix);
+                       break;
 
-       if (node->zero)
-               loc_network_tree_node_unref(node->zero);
+               case 8:
+                       r = asprintf(&buffer, "*.%d.%s",
+                               loc_address_get_octet(&network->first_address, 0),
+                               suffix);
+                       break;
 
-       if (node->one)
-               loc_network_tree_node_unref(node->one);
+               case 0:
+                       r = asprintf(&buffer, "*.%s", suffix);
+                       break;
 
-       loc_unref(node->ctx);
-       free(node);
-}
+               // To make the compiler happy
+               default:
+                       return NULL;
+       }
 
-struct loc_network_tree_node* loc_network_tree_node_unref(struct loc_network_tree_node* node) {
-       if (!node)
+       if (r < 0)
                return NULL;
 
-       if (--node->refcount > 0)
-               return node;
-
-       loc_network_tree_node_free(node);
-       return NULL;
+       return buffer;
 }
 
-struct loc_network_tree_node* loc_network_tree_node_get(struct loc_network_tree_node* node, unsigned int index) {
-       if (index == 0)
-               node = node->zero;
-       else
-               node = node->one;
-
-       if (!node)
-               return NULL;
+LOC_EXPORT char* loc_network_reverse_pointer(struct loc_network* network, const char* suffix) {
+       switch (network->family) {
+               case AF_INET6:
+                       return loc_network_reverse_pointer6(network, suffix);
 
-       return loc_network_tree_node_ref(node);
-}
+               case AF_INET:
+                       return loc_network_reverse_pointer4(network, suffix);
 
-int loc_network_tree_node_is_leaf(struct loc_network_tree_node* node) {
-       return (!!node->network);
-}
+               default:
+                       break;
+       }
 
-struct loc_network* loc_network_tree_node_get_network(struct loc_network_tree_node* node) {
-       return loc_network_ref(node->network);
+       return NULL;
 }
index 73f85b45ce586f5136b0738f33afa61e95cd5f67..6f21f2b088f84c3e38de52006a342d2b59281ef5 100644 (file)
@@ -6,10 +6,10 @@
 #include <stdio.h>
 #include <string.h>
 
-#include <loc/libloc.h>
-#include <loc/database.h>
-#include <loc/network.h>
-#include <loc/country.h>
+#include <libloc/libloc.h>
+#include <libloc/database.h>
+#include <libloc/network.h>
+#include <libloc/country.h>
 
 MODULE = Location              PACKAGE = Location
 
index 2e111b588c1be44692a5ee41337563b1607ee930..2f4b26f65b9981c4c0bcd340dce76da83ed2cd65 100644 (file)
@@ -16,8 +16,8 @@
 
 #include <Python.h>
 
-#include <loc/libloc.h>
-#include <loc/as.h>
+#include <libloc/libloc.h>
+#include <libloc/as.h>
 
 #include "locationmodule.h"
 #include "as.h"
@@ -102,8 +102,16 @@ static int AS_set_name(ASObject* self, PyObject* value) {
        return 0;
 }
 
-static PyObject* AS_richcompare(ASObject* self, ASObject* other, int op) {
-       int r = loc_as_cmp(self->as, other->as);
+static PyObject* AS_richcompare(ASObject* self, PyObject* other, int op) {
+       int r;
+
+       // Check for type
+       if (!PyObject_IsInstance(other, (PyObject *)&ASType))
+               Py_RETURN_NOTIMPLEMENTED;
+
+       ASObject* o = (ASObject*)other;
+
+       r = loc_as_cmp(self->as, o->as);
 
        switch (op) {
                case Py_EQ:
@@ -125,6 +133,12 @@ static PyObject* AS_richcompare(ASObject* self, ASObject* other, int op) {
        Py_RETURN_NOTIMPLEMENTED;
 }
 
+static Py_hash_t AS_hash(ASObject* self) {
+       uint32_t number = loc_as_get_number(self->as);
+
+       return number;
+}
+
 static struct PyGetSetDef AS_getsetters[] = {
        {
                "name",
@@ -156,4 +170,5 @@ PyTypeObject ASType = {
        .tp_repr =               (reprfunc)AS_repr,
        .tp_str =                (reprfunc)AS_str,
        .tp_richcompare =        (richcmpfunc)AS_richcompare,
+       .tp_hash =               (hashfunc)AS_hash,
 };
index 5bc72409b793e0956f70d2522f6f27200b7f68c8..d7fe36a35ca8af23ef83c381b8edf85c5c010121 100644 (file)
@@ -19,8 +19,8 @@
 
 #include <Python.h>
 
-#include <loc/libloc.h>
-#include <loc/as.h>
+#include <libloc/libloc.h>
+#include <libloc/as.h>
 
 typedef struct {
        PyObject_HEAD
index 1247a08e1014d254bce243b2b631597020459d8e..711484658801b0dd7daa57ddc18016183e7bc12a 100644 (file)
@@ -16,8 +16,8 @@
 
 #include <Python.h>
 
-#include <loc/libloc.h>
-#include <loc/country.h>
+#include <libloc/libloc.h>
+#include <libloc/country.h>
 
 #include "locationmodule.h"
 #include "country.h"
@@ -81,6 +81,10 @@ static PyObject* Country_str(CountryObject* self) {
 static PyObject* Country_get_name(CountryObject* self) {
        const char* name = loc_country_get_name(self->country);
 
+       // Return None if no name has been set
+       if (!name)
+               Py_RETURN_NONE;
+
        return PyUnicode_FromString(name);
 }
 
@@ -99,6 +103,9 @@ static int Country_set_name(CountryObject* self, PyObject* value) {
 static PyObject* Country_get_continent_code(CountryObject* self) {
        const char* code = loc_country_get_continent_code(self->country);
 
+       if (!code)
+               Py_RETURN_NONE;
+
        return PyUnicode_FromString(code);
 }
 
@@ -114,8 +121,16 @@ static int Country_set_continent_code(CountryObject* self, PyObject* value) {
        return 0;
 }
 
-static PyObject* Country_richcompare(CountryObject* self, CountryObject* other, int op) {
-       int r = loc_country_cmp(self->country, other->country);
+static PyObject* Country_richcompare(CountryObject* self, PyObject* other, int op) {
+       int r;
+
+       // Check for type
+       if (!PyObject_IsInstance(other, (PyObject *)&CountryType))
+               Py_RETURN_NOTIMPLEMENTED;
+
+       CountryObject* o = (CountryObject*)other;
+
+       r = loc_country_cmp(self->country, o->country);
 
        switch (op) {
                case Py_EQ:
@@ -137,6 +152,22 @@ static PyObject* Country_richcompare(CountryObject* self, CountryObject* other,
        Py_RETURN_NOTIMPLEMENTED;
 }
 
+static Py_hash_t Country_hash(CountryObject* self) {
+       PyObject* code = NULL;
+       Py_hash_t hash = 0;
+
+       // Fetch the code as Python string
+       code = Country_get_code(self);
+       if (!code)
+               return -1;
+
+       // Fetch the hash of that string
+       hash = PyObject_Hash(code);
+       Py_DECREF(code);
+
+       return hash;
+}
+
 static struct PyGetSetDef Country_getsetters[] = {
        {
                "code",
@@ -175,4 +206,5 @@ PyTypeObject CountryType = {
        .tp_repr =               (reprfunc)Country_repr,
        .tp_str =                (reprfunc)Country_str,
        .tp_richcompare =        (richcmpfunc)Country_richcompare,
+       .tp_hash =               (hashfunc)Country_hash,
 };
index 3d787d917ebc50dddf6e89b61e62be9bb89ef04b..346163c7c6c356313e1ba6cbf788a2a76e326a90 100644 (file)
@@ -19,8 +19,7 @@
 
 #include <Python.h>
 
-//#include <loc/libloc.h>
-#include <loc/country.h>
+#include <libloc/country.h>
 
 typedef struct {
        PyObject_HEAD
index 0aa03cce946e71c4b4a09ff7f875a6b5f5940c62..d6ee4d02d0ed1d35fcf1ccc4244d25763e222cd3 100644 (file)
 
 #include <Python.h>
 
-#include <loc/libloc.h>
-#include <loc/as.h>
-#include <loc/as-list.h>
-#include <loc/database.h>
+#include <libloc/libloc.h>
+#include <libloc/as.h>
+#include <libloc/as-list.h>
+#include <libloc/database.h>
 
 #include "locationmodule.h"
 #include "as.h"
@@ -45,28 +45,36 @@ static void Database_dealloc(DatabaseObject* self) {
 
 static int Database_init(DatabaseObject* self, PyObject* args, PyObject* kwargs) {
        const char* path = NULL;
+       FILE* f = NULL;
 
+       // Parse arguments
        if (!PyArg_ParseTuple(args, "s", &path))
                return -1;
 
+       // Copy path
        self->path = strdup(path);
+       if (!self->path)
+               goto ERROR;
 
        // Open the file for reading
-       FILE* f = fopen(self->path, "r");
-       if (!f) {
-               PyErr_SetFromErrno(PyExc_IOError);
-               return -1;
-       }
+       f = fopen(self->path, "r");
+       if (!f)
+               goto ERROR;
 
        // Load the database
        int r = loc_database_new(loc_ctx, &self->db, f);
-       fclose(f);
-
-       // Return on any errors
        if (r)
-               return -1;
+               goto ERROR;
 
+       fclose(f);
        return 0;
+
+ERROR:
+       if (f)
+               fclose(f);
+
+       PyErr_SetFromErrno(PyExc_OSError);
+       return -1;
 }
 
 static PyObject* Database_repr(DatabaseObject* self) {
@@ -103,18 +111,24 @@ static PyObject* Database_verify(DatabaseObject* self, PyObject* args) {
 
 static PyObject* Database_get_description(DatabaseObject* self) {
        const char* description = loc_database_get_description(self->db);
+       if (!description)
+               Py_RETURN_NONE;
 
        return PyUnicode_FromString(description);
 }
 
 static PyObject* Database_get_vendor(DatabaseObject* self) {
        const char* vendor = loc_database_get_vendor(self->db);
+       if (!vendor)
+               Py_RETURN_NONE;
 
        return PyUnicode_FromString(vendor);
 }
 
 static PyObject* Database_get_license(DatabaseObject* self) {
        const char* license = loc_database_get_license(self->db);
+       if (!license)
+               Py_RETURN_NONE;
 
        return PyUnicode_FromString(license);
 }
@@ -152,17 +166,32 @@ static PyObject* Database_get_as(DatabaseObject* self, PyObject* args) {
 }
 
 static PyObject* Database_get_country(DatabaseObject* self, PyObject* args) {
+       struct loc_country* country = NULL;
        const char* country_code = NULL;
 
        if (!PyArg_ParseTuple(args, "s", &country_code))
                return NULL;
 
-       struct loc_country* country;
+       // Fetch the country
        int r = loc_database_get_country(self->db, &country, country_code);
        if (r) {
-               Py_RETURN_NONE;
+               switch (errno) {
+                       case EINVAL:
+                               PyErr_SetString(PyExc_ValueError, "Invalid country code");
+                               break;
+
+                       default:
+                               PyErr_SetFromErrno(PyExc_OSError);
+                               break;
+               }
+
+               return NULL;
        }
 
+       // No result
+       if (!country)
+               Py_RETURN_NONE;
+
        PyObject* obj = new_country(&CountryType, country);
        loc_country_unref(country);
 
@@ -185,18 +214,21 @@ static PyObject* Database_lookup(DatabaseObject* self, PyObject* args) {
                loc_network_unref(network);
 
                return obj;
+       }
 
        // Nothing found
-       } else if (r == 1) {
+       if (!errno)
                Py_RETURN_NONE;
 
-       // Invalid input
-       } else if (r == -EINVAL) {
-               PyErr_Format(PyExc_ValueError, "Invalid IP address: %s", address);
-               return NULL;
+       // Handle any errors
+       switch (errno) {
+               case EINVAL:
+                       PyErr_Format(PyExc_ValueError, "Invalid IP address: %s", address);
+
+               default:
+                       PyErr_SetFromErrno(PyExc_OSError);
        }
 
-       // Unexpected error
        return NULL;
 }
 
@@ -209,7 +241,8 @@ static PyObject* new_database_enumerator(PyTypeObject* type, struct loc_database
        return (PyObject*)self;
 }
 
-static PyObject* Database_iterate_all(DatabaseObject* self, enum loc_database_enumerator_mode what, int flags) {
+static PyObject* Database_iterate_all(DatabaseObject* self,
+               enum loc_database_enumerator_mode what, int family, int flags) {
        struct loc_database_enumerator* enumerator;
 
        int r = loc_database_enumerator_new(&enumerator, self->db, what, flags);
@@ -218,6 +251,10 @@ static PyObject* Database_iterate_all(DatabaseObject* self, enum loc_database_en
                return NULL;
        }
 
+       // Set family
+       if (family)
+               loc_database_enumerator_set_family(enumerator, family);
+
        PyObject* obj = new_database_enumerator(&DatabaseEnumeratorType, enumerator);
        loc_database_enumerator_unref(enumerator);
 
@@ -225,7 +262,7 @@ static PyObject* Database_iterate_all(DatabaseObject* self, enum loc_database_en
 }
 
 static PyObject* Database_ases(DatabaseObject* self) {
-       return Database_iterate_all(self, LOC_DB_ENUMERATE_ASES, 0);
+       return Database_iterate_all(self, LOC_DB_ENUMERATE_ASES, AF_UNSPEC, 0);
 }
 
 static PyObject* Database_search_as(DatabaseObject* self, PyObject* args) {
@@ -252,11 +289,12 @@ static PyObject* Database_search_as(DatabaseObject* self, PyObject* args) {
 }
 
 static PyObject* Database_networks(DatabaseObject* self) {
-       return Database_iterate_all(self, LOC_DB_ENUMERATE_NETWORKS, 0);
+       return Database_iterate_all(self, LOC_DB_ENUMERATE_NETWORKS, AF_UNSPEC, 0);
 }
 
 static PyObject* Database_networks_flattened(DatabaseObject *self) {
-       return Database_iterate_all(self, LOC_DB_ENUMERATE_NETWORKS, LOC_DB_ENUMERATOR_FLAGS_FLATTEN);
+       return Database_iterate_all(self, LOC_DB_ENUMERATE_NETWORKS, AF_UNSPEC,
+               LOC_DB_ENUMERATOR_FLAGS_FLATTEN);
 }
 
 static PyObject* Database_search_networks(DatabaseObject* self, PyObject* args, PyObject* kwargs) {
@@ -341,7 +379,7 @@ static PyObject* Database_search_networks(DatabaseObject* self, PyObject* args,
                struct loc_as_list* asns;
                r = loc_as_list_new(loc_ctx, &asns);
                if (r) {
-                       PyErr_SetString(PyExc_SystemError, "Could not create AS list");
+                       PyErr_SetFromErrno(PyExc_OSError);
                        return NULL;
                }
 
@@ -360,7 +398,7 @@ static PyObject* Database_search_networks(DatabaseObject* self, PyObject* args,
                        struct loc_as* as;
                        r = loc_as_new(loc_ctx, &as, number);
                        if (r) {
-                               PyErr_SetString(PyExc_SystemError, "Could not create AS");
+                               PyErr_SetFromErrno(PyExc_OSError);
 
                                loc_as_list_unref(asns);
                                loc_as_unref(as);
@@ -369,7 +407,7 @@ static PyObject* Database_search_networks(DatabaseObject* self, PyObject* args,
 
                        r = loc_as_list_append(asns, as);
                        if (r) {
-                               PyErr_SetString(PyExc_SystemError, "Could not append AS to the list");
+                               PyErr_SetFromErrno(PyExc_OSError);
 
                                loc_as_list_unref(asns);
                                loc_as_unref(as);
@@ -381,7 +419,7 @@ static PyObject* Database_search_networks(DatabaseObject* self, PyObject* args,
 
                r = loc_database_enumerator_set_asns(enumerator, asns);
                if (r) {
-                       PyErr_SetFromErrno(PyExc_SystemError);
+                       PyErr_SetFromErrno(PyExc_OSError);
 
                        loc_as_list_unref(asns);
                        return NULL;
@@ -395,7 +433,7 @@ static PyObject* Database_search_networks(DatabaseObject* self, PyObject* args,
                r = loc_database_enumerator_set_flag(enumerator, flags);
 
                if (r) {
-                       PyErr_SetFromErrno(PyExc_SystemError);
+                       PyErr_SetFromErrno(PyExc_OSError);
                        return NULL;
                }
        }
@@ -405,7 +443,7 @@ static PyObject* Database_search_networks(DatabaseObject* self, PyObject* args,
                r = loc_database_enumerator_set_family(enumerator, family);
 
                if (r) {
-                       PyErr_SetFromErrno(PyExc_SystemError);
+                       PyErr_SetFromErrno(PyExc_OSError);
                        return NULL;
                }
        }
@@ -417,7 +455,18 @@ static PyObject* Database_search_networks(DatabaseObject* self, PyObject* args,
 }
 
 static PyObject* Database_countries(DatabaseObject* self) {
-       return Database_iterate_all(self, LOC_DB_ENUMERATE_COUNTRIES, 0);
+       return Database_iterate_all(self, LOC_DB_ENUMERATE_COUNTRIES, AF_UNSPEC, 0);
+}
+
+static PyObject* Database_list_bogons(DatabaseObject* self, PyObject* args, PyObject* kwargs) {
+       char* kwlist[] = { "family", NULL };
+       int family = AF_UNSPEC;
+
+       // Parse arguments
+       if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &family))
+               return NULL;
+
+       return Database_iterate_all(self, LOC_DB_ENUMERATE_BOGONS, family, 0);
 }
 
 static struct PyMethodDef Database_methods[] = {
@@ -433,6 +482,12 @@ static struct PyMethodDef Database_methods[] = {
                METH_VARARGS,
                NULL,
        },
+       {
+               "list_bogons",
+               (PyCFunction)Database_list_bogons,
+               METH_VARARGS|METH_KEYWORDS,
+               NULL,
+       },
        {
                "lookup",
                (PyCFunction)Database_lookup,
index b8c766ec8931ad44303b76e678c95808b4ff0280..88b839b5ed226eeb7f248af0f872dce768bd54ff 100644 (file)
@@ -19,7 +19,7 @@
 
 #include <Python.h>
 
-#include <loc/database.h>
+#include <libloc/database.h>
 
 typedef struct {
        PyObject_HEAD
diff --git a/src/python/database.py b/src/python/database.py
deleted file mode 100644 (file)
index 5d79941..0000000
+++ /dev/null
@@ -1,213 +0,0 @@
-#!/usr/bin/env python
-
-"""
-       A lightweight wrapper around psycopg2.
-
-       Originally part of the Tornado framework.  The tornado.database module
-       is slated for removal in Tornado 3.0, and it is now available separately
-       as torndb.
-"""
-
-import logging
-import psycopg2
-
-log = logging.getLogger("location.database")
-log.propagate = 1
-
-class Connection(object):
-       """
-               A lightweight wrapper around MySQLdb DB-API connections.
-
-               The main value we provide is wrapping rows in a dict/object so that
-               columns can be accessed by name. Typical usage::
-
-                       db = torndb.Connection("localhost", "mydatabase")
-                       for article in db.query("SELECT * FROM articles"):
-                               print article.title
-
-               Cursors are hidden by the implementation, but other than that, the methods
-               are very similar to the DB-API.
-
-               We explicitly set the timezone to UTC and the character encoding to
-               UTF-8 on all connections to avoid time zone and encoding errors.
-       """
-       def __init__(self, host, database, user=None, password=None):
-               self.host = host
-               self.database = database
-
-               self._db = None
-               self._db_args = {
-                       "host"     : host,
-                       "database" : database,
-                       "user"     : user,
-                       "password" : password,
-                       "sslmode"  : "require",
-               }
-
-               try:
-                       self.reconnect()
-               except Exception:
-                       log.error("Cannot connect to database on %s", self.host, exc_info=True)
-
-       def __del__(self):
-               self.close()
-
-       def close(self):
-               """
-                       Closes this database connection.
-               """
-               if getattr(self, "_db", None) is not None:
-                       self._db.close()
-                       self._db = None
-
-       def reconnect(self):
-               """
-                       Closes the existing database connection and re-opens it.
-               """
-               self.close()
-
-               self._db = psycopg2.connect(**self._db_args)
-               self._db.autocommit = True
-
-               # Initialize the timezone setting.
-               self.execute("SET TIMEZONE TO 'UTC'")
-
-       def query(self, query, *parameters, **kwparameters):
-               """
-                       Returns a row list for the given query and parameters.
-               """
-               cursor = self._cursor()
-               try:
-                       self._execute(cursor, query, parameters, kwparameters)
-                       column_names = [d[0] for d in cursor.description]
-                       return [Row(zip(column_names, row)) for row in cursor]
-               finally:
-                       cursor.close()
-
-       def get(self, query, *parameters, **kwparameters):
-               """
-                       Returns the first row returned for the given query.
-               """
-               rows = self.query(query, *parameters, **kwparameters)
-               if not rows:
-                       return None
-               elif len(rows) > 1:
-                       raise Exception("Multiple rows returned for Database.get() query")
-               else:
-                       return rows[0]
-
-       def execute(self, query, *parameters, **kwparameters):
-               """
-                       Executes the given query, returning the lastrowid from the query.
-               """
-               return self.execute_lastrowid(query, *parameters, **kwparameters)
-
-       def execute_lastrowid(self, query, *parameters, **kwparameters):
-               """
-                       Executes the given query, returning the lastrowid from the query.
-               """
-               cursor = self._cursor()
-               try:
-                       self._execute(cursor, query, parameters, kwparameters)
-                       return cursor.lastrowid
-               finally:
-                       cursor.close()
-
-       def execute_rowcount(self, query, *parameters, **kwparameters):
-               """
-                       Executes the given query, returning the rowcount from the query.
-               """
-               cursor = self._cursor()
-               try:
-                       self._execute(cursor, query, parameters, kwparameters)
-                       return cursor.rowcount
-               finally:
-                       cursor.close()
-
-       def executemany(self, query, parameters):
-               """
-                       Executes the given query against all the given param sequences.
-
-                       We return the lastrowid from the query.
-               """
-               return self.executemany_lastrowid(query, parameters)
-
-       def executemany_lastrowid(self, query, parameters):
-               """
-                       Executes the given query against all the given param sequences.
-
-                       We return the lastrowid from the query.
-               """
-               cursor = self._cursor()
-               try:
-                       cursor.executemany(query, parameters)
-                       return cursor.lastrowid
-               finally:
-                       cursor.close()
-
-       def executemany_rowcount(self, query, parameters):
-               """
-                       Executes the given query against all the given param sequences.
-
-                       We return the rowcount from the query.
-               """
-               cursor = self._cursor()
-
-               try:
-                       cursor.executemany(query, parameters)
-                       return cursor.rowcount
-               finally:
-                       cursor.close()
-
-       def _ensure_connected(self):
-               if self._db is None:
-                       log.warning("Database connection was lost...")
-
-                       self.reconnect()
-
-       def _cursor(self):
-               self._ensure_connected()
-               return self._db.cursor()
-
-       def _execute(self, cursor, query, parameters, kwparameters):
-               log.debug("SQL Query: %s" % (query % (kwparameters or parameters)))
-
-               try:
-                       return cursor.execute(query, kwparameters or parameters)
-               except (OperationalError, psycopg2.ProgrammingError):
-                       log.error("Error connecting to database on %s", self.host)
-                       self.close()
-                       raise
-
-       def transaction(self):
-               return Transaction(self)
-
-
-class Row(dict):
-       """A dict that allows for object-like property access syntax."""
-       def __getattr__(self, name):
-               try:
-                       return self[name]
-               except KeyError:
-                       raise AttributeError(name)
-
-
-class Transaction(object):
-       def __init__(self, db):
-               self.db = db
-
-               self.db.execute("START TRANSACTION")
-
-       def __enter__(self):
-               return self
-
-       def __exit__(self, exctype, excvalue, traceback):
-               if exctype is not None:
-                       self.db.execute("ROLLBACK")
-               else:
-                       self.db.execute("COMMIT")
-
-
-# Alias some common exceptions
-IntegrityError = psycopg2.IntegrityError
-OperationalError = psycopg2.OperationalError
diff --git a/src/python/importer.py b/src/python/importer.py
deleted file mode 100644 (file)
index 4c8406c..0000000
+++ /dev/null
@@ -1,243 +0,0 @@
-#!/usr/bin/python3
-###############################################################################
-#                                                                             #
-# libloc - A library to determine the location of someone on the Internet     #
-#                                                                             #
-# Copyright (C) 2020 IPFire Development Team <info@ipfire.org>                #
-#                                                                             #
-# This library is free software; you can redistribute it and/or               #
-# modify it under the terms of the GNU Lesser General Public                  #
-# License as published by the Free Software Foundation; either                #
-# version 2.1 of the License, or (at your option) any later version.          #
-#                                                                             #
-# This library is distributed in the hope that it will be useful,             #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of              #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU           #
-# Lesser General Public License for more details.                             #
-#                                                                             #
-###############################################################################
-
-import gzip
-import logging
-import urllib.request
-
-# Initialise logging
-log = logging.getLogger("location.importer")
-log.propagate = 1
-
-WHOIS_SOURCES = {
-       # African Network Information Centre
-       "AFRINIC": [
-               "https://ftp.afrinic.net/pub/pub/dbase/afrinic.db.gz"
-               ],
-
-       # Asia Pacific Network Information Centre
-       "APNIC": [
-               "https://ftp.apnic.net/apnic/whois/apnic.db.inet6num.gz",
-               "https://ftp.apnic.net/apnic/whois/apnic.db.inetnum.gz",
-               #"https://ftp.apnic.net/apnic/whois/apnic.db.route6.gz",
-               #"https://ftp.apnic.net/apnic/whois/apnic.db.route.gz",
-               "https://ftp.apnic.net/apnic/whois/apnic.db.aut-num.gz",
-               "https://ftp.apnic.net/apnic/whois/apnic.db.organisation.gz"
-               ],
-
-       # American Registry for Internet Numbers
-       # XXX there is nothing useful for us in here
-       # ARIN: [
-       #       "https://ftp.arin.net/pub/rr/arin.db"
-       # ],
-
-       # Latin America and Caribbean Network Information Centre
-       # XXX ???
-
-       # Réseaux IP Européens
-       "RIPE": [
-               "https://ftp.ripe.net/ripe/dbase/split/ripe.db.inet6num.gz",
-               "https://ftp.ripe.net/ripe/dbase/split/ripe.db.inetnum.gz",
-               #"https://ftp.ripe.net/ripe/dbase/split/ripe.db.route6.gz",
-               #"https://ftp.ripe.net/ripe/dbase/split/ripe.db.route.gz",
-               "https://ftp.ripe.net/ripe/dbase/split/ripe.db.aut-num.gz",
-               "https://ftp.ripe.net/ripe/dbase/split/ripe.db.organisation.gz"
-               ],
-}
-
-EXTENDED_SOURCES = {
-       # African Network Information Centre
-       # "ARIN": [
-       #       "https://ftp.afrinic.net/pub/stats/afrinic/delegated-afrinic-extended-latest"
-       # ],
-
-       # Asia Pacific Network Information Centre
-       # "APNIC": [
-       #       "https://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-extended-latest"
-       # ],
-
-       # American Registry for Internet Numbers
-       "ARIN": [
-               "https://ftp.arin.net/pub/stats/arin/delegated-arin-extended-latest"
-               ],
-
-       # Latin America and Caribbean Network Information Centre
-       "LACNIC": [
-               "https://ftp.lacnic.net/pub/stats/lacnic/delegated-lacnic-extended-latest"
-               ],
-
-       # Réseaux IP Européens
-       # "RIPE": [
-       #       "https://ftp.ripe.net/pub/stats/ripencc/delegated-ripencc-extended-latest"
-       # ],
-}
-
-class Downloader(object):
-       def __init__(self):
-               self.proxy = None
-
-       def set_proxy(self, url):
-               """
-                       Sets a HTTP proxy that is used to perform all requests
-               """
-               log.info("Using proxy %s" % url)
-               self.proxy = url
-
-       def request(self, url, data=None, return_blocks=False):
-               req = urllib.request.Request(url, data=data)
-
-               # Configure proxy
-               if self.proxy:
-                       req.set_proxy(self.proxy, "http")
-
-               return DownloaderContext(self, req, return_blocks=return_blocks)
-
-
-class DownloaderContext(object):
-       def __init__(self, downloader, request, return_blocks=False):
-               self.downloader = downloader
-               self.request = request
-
-               # Should we return one block or a single line?
-               self.return_blocks = return_blocks
-
-               # Save the response object
-               self.response = None
-
-       def __enter__(self):
-               log.info("Retrieving %s..." % self.request.full_url)
-
-               # Send request
-               self.response = urllib.request.urlopen(self.request)
-
-               # Log the response headers
-               log.debug("Response Headers:")
-               for header in self.headers:
-                       log.debug("     %s: %s" % (header, self.get_header(header)))
-
-               return self
-
-       def __exit__(self, type, value, traceback):
-               pass
-
-       def __iter__(self):
-               """
-                       Makes the object iterable by going through each block
-               """
-               if self.return_blocks:
-                       return iterate_over_blocks(self.body)
-
-               return iterate_over_lines(self.body)
-
-       @property
-       def headers(self):
-               if self.response:
-                       return self.response.headers
-
-       def get_header(self, name):
-               if self.headers:
-                       return self.headers.get(name)
-
-       @property
-       def body(self):
-               """
-                       Returns a file-like object with the decoded content
-                       of the response.
-               """
-               content_type = self.get_header("Content-Type")
-
-               # Decompress any gzipped response on the fly
-               if content_type in ("application/x-gzip", "application/gzip"):
-                       return gzip.GzipFile(fileobj=self.response, mode="rb")
-
-               # Return the response by default
-               return self.response
-
-
-def read_blocks(f):
-       for block in iterate_over_blocks(f):
-               type = None
-               data = {}
-
-               for i, line in enumerate(block):
-                       key, value = line.split(":", 1)
-
-                       # The key of the first line defines the type
-                       if i == 0:
-                               type = key
-
-                       # Store value
-                       data[key] = value.strip()
-
-               yield type, data
-
-def iterate_over_blocks(f, charsets=("utf-8", "latin1")):
-       block = []
-
-       for line in f:
-               # Convert to string
-               for charset in charsets:
-                       try:
-                               line = line.decode(charset)
-                       except UnicodeDecodeError:
-                               continue
-                       else:
-                               break
-
-               # Skip commented lines
-               if line.startswith("#") or line.startswith("%"):
-                       continue
-
-               # Strip line-endings
-               line = line.rstrip()
-
-               # Remove any comments at the end of line
-               line, hash, comment = line.partition("#")
-
-               if comment:
-                       # Strip any whitespace before the comment
-                       line = line.rstrip()
-
-                       # If the line is now empty, we move on
-                       if not line:
-                               continue
-
-               if line:
-                       block.append(line)
-                       continue
-
-               # End the block on an empty line
-               if block:
-                       yield block
-
-               # Reset the block
-               block = []
-
-       # Return the last block
-       if block:
-               yield block
-
-
-def iterate_over_lines(f):
-       for line in f:
-               # Decode the line
-               line = line.decode()
-
-               # Strip the ending
-               yield line.rstrip()
diff --git a/src/python/location-importer.in b/src/python/location-importer.in
deleted file mode 100644 (file)
index ff90d91..0000000
+++ /dev/null
@@ -1,1282 +0,0 @@
-#!/usr/bin/python3
-###############################################################################
-#                                                                             #
-# libloc - A library to determine the location of someone on the Internet     #
-#                                                                             #
-# Copyright (C) 2020-2021 IPFire Development Team <info@ipfire.org>           #
-#                                                                             #
-# This library is free software; you can redistribute it and/or               #
-# modify it under the terms of the GNU Lesser General Public                  #
-# License as published by the Free Software Foundation; either                #
-# version 2.1 of the License, or (at your option) any later version.          #
-#                                                                             #
-# This library is distributed in the hope that it will be useful,             #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of              #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU           #
-# Lesser General Public License for more details.                             #
-#                                                                             #
-###############################################################################
-
-import argparse
-import ipaddress
-import json
-import logging
-import math
-import re
-import socket
-import sys
-import telnetlib
-
-# Load our location module
-import location
-import location.database
-import location.importer
-from location.i18n import _
-
-# Initialise logging
-log = logging.getLogger("location.importer")
-log.propagate = 1
-
-class CLI(object):
-       def parse_cli(self):
-               parser = argparse.ArgumentParser(
-                       description=_("Location Importer Command Line Interface"),
-               )
-               subparsers = parser.add_subparsers()
-
-               # Global configuration flags
-               parser.add_argument("--debug", action="store_true",
-                       help=_("Enable debug output"))
-               parser.add_argument("--quiet", action="store_true",
-                       help=_("Enable quiet mode"))
-
-               # version
-               parser.add_argument("--version", action="version",
-                       version="%(prog)s @VERSION@")
-
-               # Database
-               parser.add_argument("--database-host", required=True,
-                       help=_("Database Hostname"), metavar=_("HOST"))
-               parser.add_argument("--database-name", required=True,
-                       help=_("Database Name"), metavar=_("NAME"))
-               parser.add_argument("--database-username", required=True,
-                       help=_("Database Username"), metavar=_("USERNAME"))
-               parser.add_argument("--database-password", required=True,
-                       help=_("Database Password"), metavar=_("PASSWORD"))
-
-               # Write Database
-               write = subparsers.add_parser("write", help=_("Write database to file"))
-               write.set_defaults(func=self.handle_write)
-               write.add_argument("file", nargs=1, help=_("Database File"))
-               write.add_argument("--signing-key", nargs="?", type=open, help=_("Signing Key"))
-               write.add_argument("--backup-signing-key", nargs="?", type=open, help=_("Backup Signing Key"))
-               write.add_argument("--vendor", nargs="?", help=_("Sets the vendor"))
-               write.add_argument("--description", nargs="?", help=_("Sets a description"))
-               write.add_argument("--license", nargs="?", help=_("Sets the license"))
-               write.add_argument("--version", type=int, help=_("Database Format Version"))
-
-               # Update WHOIS
-               update_whois = subparsers.add_parser("update-whois", help=_("Update WHOIS Information"))
-               update_whois.set_defaults(func=self.handle_update_whois)
-
-               # Update announcements
-               update_announcements = subparsers.add_parser("update-announcements",
-                       help=_("Update BGP Annoucements"))
-               update_announcements.set_defaults(func=self.handle_update_announcements)
-               update_announcements.add_argument("server", nargs=1,
-                       help=_("Route Server to connect to"), metavar=_("SERVER"))
-
-               # Update overrides
-               update_overrides = subparsers.add_parser("update-overrides",
-                       help=_("Update overrides"),
-               )
-               update_overrides.add_argument(
-                       "files", nargs="+", help=_("Files to import"),
-               )
-               update_overrides.set_defaults(func=self.handle_update_overrides)
-
-               # Import countries
-               import_countries = subparsers.add_parser("import-countries",
-                       help=_("Import countries"),
-               )
-               import_countries.add_argument("file", nargs=1, type=argparse.FileType("r"),
-                       help=_("File to import"))
-               import_countries.set_defaults(func=self.handle_import_countries)
-
-               args = parser.parse_args()
-
-               # Configure logging
-               if args.debug:
-                       location.logger.set_level(logging.DEBUG)
-               elif args.quiet:
-                       location.logger.set_level(logging.WARNING)
-
-               # Print usage if no action was given
-               if not "func" in args:
-                       parser.print_usage()
-                       sys.exit(2)
-
-               return args
-
-       def run(self):
-               # Parse command line arguments
-               args = self.parse_cli()
-
-               # Initialise database
-               self.db = self._setup_database(args)
-
-               # Call function
-               ret = args.func(args)
-
-               # Return with exit code
-               if ret:
-                       sys.exit(ret)
-
-               # Otherwise just exit
-               sys.exit(0)
-
-       def _setup_database(self, ns):
-               """
-                       Initialise the database
-               """
-               # Connect to database
-               db = location.database.Connection(
-                       host=ns.database_host, database=ns.database_name,
-                       user=ns.database_username, password=ns.database_password,
-               )
-
-               with db.transaction():
-                       db.execute("""
-                               -- announcements
-                               CREATE TABLE IF NOT EXISTS announcements(network inet, autnum bigint,
-                                       first_seen_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP,
-                                       last_seen_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP);
-                               CREATE UNIQUE INDEX IF NOT EXISTS announcements_networks ON announcements(network);
-                               CREATE INDEX IF NOT EXISTS announcements_family ON announcements(family(network));
-                               CREATE INDEX IF NOT EXISTS announcements_search ON announcements USING GIST(network inet_ops);
-
-                               -- autnums
-                               CREATE TABLE IF NOT EXISTS autnums(number bigint, name text NOT NULL);
-                               ALTER TABLE autnums ADD COLUMN IF NOT EXISTS source text NOT NULL;
-                               CREATE UNIQUE INDEX IF NOT EXISTS autnums_number ON autnums(number);
-
-                               -- countries
-                               CREATE TABLE IF NOT EXISTS countries(
-                                       country_code text NOT NULL, name text NOT NULL, continent_code text NOT NULL);
-                               CREATE UNIQUE INDEX IF NOT EXISTS countries_country_code ON countries(country_code);
-
-                               -- networks
-                               CREATE TABLE IF NOT EXISTS networks(network inet, country text);
-                               ALTER TABLE networks ADD COLUMN IF NOT EXISTS original_countries text[];
-                               ALTER TABLE networks ADD COLUMN IF NOT EXISTS source text NOT NULL;
-                               CREATE UNIQUE INDEX IF NOT EXISTS networks_network ON networks(network);
-                               CREATE INDEX IF NOT EXISTS networks_family ON networks USING BTREE(family(network));
-                               CREATE INDEX IF NOT EXISTS networks_search ON networks USING GIST(network inet_ops);
-
-                               -- overrides
-                               CREATE TABLE IF NOT EXISTS autnum_overrides(
-                                       number bigint NOT NULL,
-                                       name text,
-                                       country text,
-                                       is_anonymous_proxy boolean,
-                                       is_satellite_provider boolean,
-                                       is_anycast boolean
-                               );
-                               CREATE UNIQUE INDEX IF NOT EXISTS autnum_overrides_number
-                                       ON autnum_overrides(number);
-                               ALTER TABLE autnum_overrides ADD COLUMN IF NOT EXISTS source text;
-                               ALTER TABLE autnum_overrides ADD COLUMN IF NOT EXISTS is_drop boolean;
-
-                               CREATE TABLE IF NOT EXISTS network_overrides(
-                                       network inet NOT NULL,
-                                       country text,
-                                       is_anonymous_proxy boolean,
-                                       is_satellite_provider boolean,
-                                       is_anycast boolean
-                               );
-                               CREATE UNIQUE INDEX IF NOT EXISTS network_overrides_network
-                                       ON network_overrides(network);
-                               CREATE INDEX IF NOT EXISTS network_overrides_search
-                                       ON network_overrides USING GIST(network inet_ops);
-                               ALTER TABLE network_overrides ADD COLUMN IF NOT EXISTS source text;
-                               ALTER TABLE network_overrides ADD COLUMN IF NOT EXISTS is_drop boolean;
-                       """)
-
-               return db
-
-       def handle_write(self, ns):
-               """
-                       Compiles a database in libloc format out of what is in the database
-               """
-               # Allocate a writer
-               writer = location.Writer(ns.signing_key, ns.backup_signing_key)
-
-               # Set all metadata
-               if ns.vendor:
-                       writer.vendor = ns.vendor
-
-               if ns.description:
-                       writer.description = ns.description
-
-               if ns.license:
-                       writer.license = ns.license
-
-               # Add all Autonomous Systems
-               log.info("Writing Autonomous Systems...")
-
-               # Select all ASes with a name
-               rows = self.db.query("""
-                       SELECT
-                               autnums.number AS number,
-                               COALESCE(
-                                       (SELECT overrides.name FROM autnum_overrides overrides
-                                               WHERE overrides.number = autnums.number),
-                                       autnums.name
-                               ) AS name
-                               FROM autnums
-                               WHERE name <> %s ORDER BY number
-                       """, "")
-
-               for row in rows:
-                       a = writer.add_as(row.number)
-                       a.name = row.name
-
-               # Add all networks
-               log.info("Writing networks...")
-
-               # Select all known networks
-               rows = self.db.query("""
-                       -- Return a list of those networks enriched with all
-                       -- other information that we store in the database
-                       SELECT
-                               DISTINCT ON (network)
-                               network,
-                               autnum,
-
-                               -- Country
-                               COALESCE(
-                                       (
-                                               SELECT country FROM network_overrides overrides
-                                                       WHERE networks.network <<= overrides.network
-                                                       ORDER BY masklen(overrides.network) DESC
-                                                       LIMIT 1
-                                       ),
-                                       (
-                                               SELECT country FROM autnum_overrides overrides
-                                                       WHERE networks.autnum = overrides.number
-                                       ),
-                                       networks.country
-                               ) AS country,
-
-                               -- Flags
-                               COALESCE(
-                                       (
-                                               SELECT is_anonymous_proxy FROM network_overrides overrides
-                                                       WHERE networks.network <<= overrides.network
-                                                       ORDER BY masklen(overrides.network) DESC
-                                                       LIMIT 1
-                                       ),
-                                       (
-                                               SELECT is_anonymous_proxy FROM autnum_overrides overrides
-                                                       WHERE networks.autnum = overrides.number
-                                       ),
-                                       FALSE
-                               ) AS is_anonymous_proxy,
-                               COALESCE(
-                                       (
-                                               SELECT is_satellite_provider FROM network_overrides overrides
-                                                       WHERE networks.network <<= overrides.network
-                                                       ORDER BY masklen(overrides.network) DESC
-                                                       LIMIT 1
-                                       ),
-                                       (
-                                               SELECT is_satellite_provider FROM autnum_overrides overrides
-                                                       WHERE networks.autnum = overrides.number
-                                       ),
-                                       FALSE
-                               ) AS is_satellite_provider,
-                               COALESCE(
-                                       (
-                                               SELECT is_anycast FROM network_overrides overrides
-                                                       WHERE networks.network <<= overrides.network
-                                                       ORDER BY masklen(overrides.network) DESC
-                                                       LIMIT 1
-                                       ),
-                                       (
-                                               SELECT is_anycast FROM autnum_overrides overrides
-                                                       WHERE networks.autnum = overrides.number
-                                       ),
-                                       FALSE
-                               ) AS is_anycast,
-                               COALESCE(
-                                       (
-                                               SELECT is_drop FROM network_overrides overrides
-                                                       WHERE networks.network <<= overrides.network
-                                                       ORDER BY masklen(overrides.network) DESC
-                                                       LIMIT 1
-                                       ),
-                                       (
-                                               SELECT is_drop FROM autnum_overrides overrides
-                                                       WHERE networks.autnum = overrides.number
-                                       ),
-                                       FALSE
-                               ) AS is_drop
-                       FROM (
-                               SELECT
-                                       known_networks.network AS network,
-                                       announcements.autnum AS autnum,
-                                       networks.country AS country,
-
-                                       -- Must be part of returned values for ORDER BY clause
-                                       masklen(announcements.network) AS sort_a,
-                                       masklen(networks.network) AS sort_b
-                               FROM (
-                                               SELECT network FROM announcements
-                                       UNION ALL
-                                               SELECT network FROM networks
-                                       UNION ALL
-                                               SELECT network FROM network_overrides
-                                       ) known_networks
-                               LEFT JOIN
-                                       announcements ON known_networks.network <<= announcements.network
-                               LEFT JOIN
-                                       networks ON known_networks.network <<= networks.network
-                               ORDER BY
-                                       known_networks.network,
-                                       sort_a DESC,
-                                       sort_b DESC
-                       ) networks
-               """)
-
-               for row in rows:
-                       network = writer.add_network(row.network)
-
-                       # Save country
-                       if row.country:
-                               network.country_code = row.country
-
-                       # Save ASN
-                       if row.autnum:
-                               network.asn = row.autnum
-
-                       # Set flags
-                       if row.is_anonymous_proxy:
-                               network.set_flag(location.NETWORK_FLAG_ANONYMOUS_PROXY)
-
-                       if row.is_satellite_provider:
-                               network.set_flag(location.NETWORK_FLAG_SATELLITE_PROVIDER)
-
-                       if row.is_anycast:
-                               network.set_flag(location.NETWORK_FLAG_ANYCAST)
-
-                       if row.is_drop:
-                               network.set_flag(location.NETWORK_FLAG_DROP)
-
-               # Add all countries
-               log.info("Writing countries...")
-               rows = self.db.query("SELECT * FROM countries ORDER BY country_code")
-
-               for row in rows:
-                       c = writer.add_country(row.country_code)
-                       c.continent_code = row.continent_code
-                       c.name = row.name
-
-               # Write everything to file
-               log.info("Writing database to file...")
-               for file in ns.file:
-                       writer.write(file)
-
-       def handle_update_whois(self, ns):
-               downloader = location.importer.Downloader()
-
-               # Download all sources
-               with self.db.transaction():
-                       # Create some temporary tables to store parsed data
-                       self.db.execute("""
-                               CREATE TEMPORARY TABLE _autnums(number integer, organization text, source text NOT NULL)
-                                       ON COMMIT DROP;
-                               CREATE UNIQUE INDEX _autnums_number ON _autnums(number);
-
-                               CREATE TEMPORARY TABLE _organizations(handle text, name text NOT NULL, source text NOT NULL)
-                                       ON COMMIT DROP;
-                               CREATE UNIQUE INDEX _organizations_handle ON _organizations(handle);
-
-                               CREATE TEMPORARY TABLE _rirdata(network inet NOT NULL, country text NOT NULL, original_countries text[] NOT NULL, source text NOT NULL)
-                                       ON COMMIT DROP;
-                               CREATE INDEX _rirdata_search ON _rirdata USING BTREE(family(network), masklen(network));
-                               CREATE UNIQUE INDEX _rirdata_network ON _rirdata(network);
-                       """)
-
-                       # Remove all previously imported content
-                       self.db.execute("""
-                               TRUNCATE TABLE networks;
-                       """)
-
-                       # Fetch all valid country codes to check parsed networks aganist...
-                       rows = self.db.query("SELECT * FROM countries ORDER BY country_code")
-                       validcountries = []
-
-                       for row in rows:
-                               validcountries.append(row.country_code)
-
-                       for source_key in location.importer.WHOIS_SOURCES:
-                               for single_url in location.importer.WHOIS_SOURCES[source_key]:
-                                       with downloader.request(single_url, return_blocks=True) as f:
-                                               for block in f:
-                                                       self._parse_block(block, source_key, validcountries)
-
-                       # Process all parsed networks from every RIR we happen to have access to,
-                       # insert the largest network chunks into the networks table immediately...
-                       families = self.db.query("SELECT DISTINCT family(network) AS family FROM _rirdata ORDER BY family(network)")
-
-                       for family in (row.family for row in families):
-                               smallest = self.db.get("SELECT MIN(masklen(network)) AS prefix FROM _rirdata WHERE family(network) = %s", family)
-
-                               self.db.execute("INSERT INTO networks(network, country, original_countries, source) \
-                                       SELECT network, country, original_countries, source FROM _rirdata WHERE masklen(network) = %s AND family(network) = %s", smallest.prefix, family)
-
-                               # ... determine any other prefixes for this network family, ...
-                               prefixes = self.db.query("SELECT DISTINCT masklen(network) AS prefix FROM _rirdata \
-                                       WHERE family(network) = %s ORDER BY masklen(network) ASC OFFSET 1", family)
-
-                               # ... and insert networks with this prefix in case they provide additional
-                               # information (i. e. subnet of a larger chunk with a different country)
-                               for prefix in (row.prefix for row in prefixes):
-                                       self.db.execute("""
-                                               WITH candidates AS (
-                                                       SELECT
-                                                               _rirdata.network,
-                                                               _rirdata.country,
-                                                               _rirdata.original_countries,
-                                                               _rirdata.source
-                                                       FROM
-                                                               _rirdata
-                                                       WHERE
-                                                               family(_rirdata.network) = %s
-                                                       AND
-                                                               masklen(_rirdata.network) = %s
-                                               ),
-                                               filtered AS (
-                                                       SELECT
-                                                               DISTINCT ON (c.network)
-                                                               c.network,
-                                                               c.country,
-                                                               c.original_countries,
-                                                               c.source,
-                                                               masklen(networks.network),
-                                                               networks.country AS parent_country
-                                                       FROM
-                                                               candidates c
-                                                       LEFT JOIN
-                                                               networks
-                                                       ON
-                                                               c.network << networks.network
-                                                       ORDER BY
-                                                               c.network,
-                                                               masklen(networks.network) DESC NULLS LAST
-                                               )
-                                               INSERT INTO
-                                                       networks(network, country, original_countries, source)
-                                               SELECT
-                                                       network,
-                                                       country,
-                                                       original_countries,
-                                                       source
-                                               FROM
-                                                       filtered
-                                               WHERE
-                                                       parent_country IS NULL
-                                               OR
-                                                       country <> parent_country
-                                               ON CONFLICT DO NOTHING""",
-                                               family, prefix,
-                                       )
-
-                       self.db.execute("""
-                               INSERT INTO autnums(number, name, source)
-                                       SELECT _autnums.number, _organizations.name, _organizations.source FROM _autnums
-                                               JOIN _organizations ON _autnums.organization = _organizations.handle
-                               ON CONFLICT (number) DO UPDATE SET name = excluded.name;
-                       """)
-
-               # Download all extended sources
-               for source_key in location.importer.EXTENDED_SOURCES:
-                       for single_url in location.importer.EXTENDED_SOURCES[source_key]:
-                               with self.db.transaction():
-                                       # Download data
-                                       with downloader.request(single_url) as f:
-                                               for line in f:
-                                                       self._parse_line(line, source_key, validcountries)
-
-               # Download and import (technical) AS names from ARIN
-               self._import_as_names_from_arin()
-
-       def _check_parsed_network(self, network):
-               """
-                       Assistive function to detect and subsequently sort out parsed
-                       networks from RIR data (both Whois and so-called "extended sources"),
-                       which are or have...
-
-                       (a) not globally routable (RFC 1918 space, et al.)
-                       (b) covering a too large chunk of the IP address space (prefix length
-                               is < 7 for IPv4 networks, and < 10 for IPv6)
-                       (c) "0.0.0.0" or "::" as a network address
-                       (d) are too small for being publicly announced (we have decided not to
-                               process them at the moment, as they significantly enlarge our
-                               database without providing very helpful additional information)
-
-                       This unfortunately is necessary due to brain-dead clutter across
-                       various RIR databases, causing mismatches and eventually disruptions.
-
-                       We will return False in case a network is not suitable for adding
-                       it to our database, and True otherwise.
-               """
-
-               if not network or not (isinstance(network, ipaddress.IPv4Network) or isinstance(network, ipaddress.IPv6Network)):
-                       return False
-
-               if not network.is_global:
-                       log.debug("Skipping non-globally routable network: %s" % network)
-                       return False
-
-               if network.version == 4:
-                       if network.prefixlen < 7:
-                               log.debug("Skipping too big IP chunk: %s" % network)
-                               return False
-
-                       if network.prefixlen > 24:
-                               log.debug("Skipping network too small to be publicly announced: %s" % network)
-                               return False
-
-                       if str(network.network_address) == "0.0.0.0":
-                               log.debug("Skipping network based on 0.0.0.0: %s" % network)
-                               return False
-
-               elif network.version == 6:
-                       if network.prefixlen < 10:
-                               log.debug("Skipping too big IP chunk: %s" % network)
-                               return False
-
-                       if network.prefixlen > 48:
-                               log.debug("Skipping network too small to be publicly announced: %s" % network)
-                               return False
-
-                       if str(network.network_address) == "::":
-                               log.debug("Skipping network based on '::': %s" % network)
-                               return False
-
-               else:
-                       # This should not happen...
-                       log.warning("Skipping network of unknown family, this should not happen: %s" % network)
-                       return False
-
-               # In case we have made it here, the network is considered to
-               # be suitable for libloc consumption...
-               return True
-
-       def _parse_block(self, block, source_key, validcountries = None):
-               # Get first line to find out what type of block this is
-               line = block[0]
-
-               # aut-num
-               if line.startswith("aut-num:"):
-                       return self._parse_autnum_block(block, source_key)
-
-               # inetnum
-               if line.startswith("inet6num:") or line.startswith("inetnum:"):
-                       return self._parse_inetnum_block(block, source_key, validcountries)
-
-               # organisation
-               elif line.startswith("organisation:"):
-                       return self._parse_org_block(block, source_key)
-
-       def _parse_autnum_block(self, block, source_key):
-               autnum = {}
-               for line in block:
-                       # Split line
-                       key, val = split_line(line)
-
-                       if key == "aut-num":
-                               m = re.match(r"^(AS|as)(\d+)", val)
-                               if m:
-                                       autnum["asn"] = m.group(2)
-
-                       elif key == "org":
-                               autnum[key] = val.upper()
-
-               # Skip empty objects
-               if not autnum:
-                       return
-
-               # Insert into database
-               self.db.execute("INSERT INTO _autnums(number, organization, source) \
-                       VALUES(%s, %s, %s) ON CONFLICT (number) DO UPDATE SET \
-                               organization = excluded.organization",
-                       autnum.get("asn"), autnum.get("org"), source_key,
-               )
-
-       def _parse_inetnum_block(self, block, source_key, validcountries = None):
-               log.debug("Parsing inetnum block:")
-
-               inetnum = {}
-               for line in block:
-                       log.debug(line)
-
-                       # Split line
-                       key, val = split_line(line)
-
-                       # Filter any inetnum records which are only referring to IP space
-                       # not managed by that specific RIR...
-                       if key == "netname":
-                               if re.match(r"(ERX-NETBLOCK|(AFRINIC|ARIN|LACNIC|RIPE)-CIDR-BLOCK|IANA-NETBLOCK-\d{1,3}|NON-RIPE-NCC-MANAGED-ADDRESS-BLOCK)", val.strip()):
-                                       log.debug("Skipping record indicating historic/orphaned data: %s" % val.strip())
-                                       return
-
-                       if key == "inetnum":
-                               start_address, delim, end_address = val.partition("-")
-
-                               # Strip any excess space
-                               start_address, end_address = start_address.rstrip(), end_address.strip()
-
-                               # Convert to IP address
-                               try:
-                                       start_address = ipaddress.ip_address(start_address)
-                                       end_address   = ipaddress.ip_address(end_address)
-                               except ValueError:
-                                       log.warning("Could not parse line: %s" % line)
-                                       return
-
-                               inetnum["inetnum"] = list(ipaddress.summarize_address_range(start_address, end_address))
-
-                       elif key == "inet6num":
-                               inetnum[key] = [ipaddress.ip_network(val, strict=False)]
-
-                       elif key == "country":
-                               val = val.upper()
-
-                               # Catch RIR data objects with more than one country code...
-                               if not key in inetnum:
-                                       inetnum[key] = []
-                               else:
-                                       if val in inetnum.get("country"):
-                                               # ... but keep this list distinct...
-                                               continue
-
-                               inetnum[key].append(val)
-
-               # Skip empty objects
-               if not inetnum or not "country" in inetnum:
-                       return
-
-               # Prepare skipping objects with unknown country codes...
-               invalidcountries = [singlecountry for singlecountry in inetnum.get("country") if singlecountry not in validcountries]
-
-               # Iterate through all networks enumerated from above, check them for plausibility and insert
-               # them into the database, if _check_parsed_network() succeeded
-               for single_network in inetnum.get("inet6num") or inetnum.get("inetnum"):
-                       if self._check_parsed_network(single_network):
-
-                               # Skip objects with unknown country codes if they are valid to avoid log spam...
-                               if validcountries and invalidcountries:
-                                       log.warning("Skipping network with bogus countr(y|ies) %s (original countries: %s): %s" % \
-                                               (invalidcountries, inetnum.get("country"), inetnum.get("inet6num") or inetnum.get("inetnum")))
-
-                               # Everything is fine here, run INSERT statement...
-                               self.db.execute("INSERT INTO _rirdata(network, country, original_countries, source) \
-                                       VALUES(%s, %s, %s, %s) ON CONFLICT (network) DO UPDATE SET country = excluded.country",
-                                       "%s" % single_network, inetnum.get("country")[0], inetnum.get("country"), source_key,
-                               )
-
-       def _parse_org_block(self, block, source_key):
-               org = {}
-               for line in block:
-                       # Split line
-                       key, val = split_line(line)
-
-                       if key == "organisation":
-                               org[key] = val.upper()
-                       elif key == "org-name":
-                               org[key] = val
-
-               # Skip empty objects
-               if not org:
-                       return
-
-               self.db.execute("INSERT INTO _organizations(handle, name, source) \
-                       VALUES(%s, %s, %s) ON CONFLICT (handle) DO \
-                       UPDATE SET name = excluded.name",
-                       org.get("organisation"), org.get("org-name"), source_key,
-               )
-
-       def _parse_line(self, line, source_key, validcountries = None):
-               # Skip version line
-               if line.startswith("2"):
-                       return
-
-               # Skip comments
-               if line.startswith("#"):
-                       return
-
-               try:
-                       registry, country_code, type, line = line.split("|", 3)
-               except:
-                       log.warning("Could not parse line: %s" % line)
-                       return
-
-               # Skip any lines that are for stats only or do not have a country
-               # code at all (avoids log spam below)
-               if not country_code or country_code == '*':
-                       return
-
-               # Skip objects with unknown country codes
-               if validcountries and country_code not in validcountries:
-                       log.warning("Skipping line with bogus country '%s': %s" % \
-                               (country_code, line))
-                       return
-
-               if type in ("ipv6", "ipv4"):
-                       return self._parse_ip_line(country_code, type, line, source_key)
-
-       def _parse_ip_line(self, country, type, line, source_key):
-               try:
-                       address, prefix, date, status, organization = line.split("|")
-               except ValueError:
-                       organization = None
-
-                       # Try parsing the line without organization
-                       try:
-                               address, prefix, date, status = line.split("|")
-                       except ValueError:
-                               log.warning("Unhandled line format: %s" % line)
-                               return
-
-               # Skip anything that isn't properly assigned
-               if not status in ("assigned", "allocated"):
-                       return
-
-               # Cast prefix into an integer
-               try:
-                       prefix = int(prefix)
-               except:
-                       log.warning("Invalid prefix: %s" % prefix)
-                       return
-
-               # Fix prefix length for IPv4
-               if type == "ipv4":
-                       prefix = 32 - int(math.log(prefix, 2))
-
-               # Try to parse the address
-               try:
-                       network = ipaddress.ip_network("%s/%s" % (address, prefix), strict=False)
-               except ValueError:
-                       log.warning("Invalid IP address: %s" % address)
-                       return
-
-               if not self._check_parsed_network(network):
-                       return
-
-               self.db.execute("INSERT INTO networks(network, country, original_countries, source) \
-                       VALUES(%s, %s, %s, %s) ON CONFLICT (network) DO \
-                       UPDATE SET country = excluded.country",
-                       "%s" % network, country, [country], source_key,
-               )
-
-       def _import_as_names_from_arin(self):
-               downloader = location.importer.Downloader()
-
-               # XXX: Download AS names file from ARIN (note that these names appear to be quite
-               # technical, not intended for human consumption, as description fields in
-               # organisation handles for other RIRs are - however, this is what we have got,
-               # and in some cases, it might be still better than nothing)
-               with downloader.request("https://ftp.arin.net/info/asn.txt", return_blocks=False) as f:
-                       for line in f:
-                               # Convert binary line to string...
-                               line = str(line)
-
-                               # ... valid lines start with a space, followed by the number of the Autonomous System ...
-                               if not line.startswith(" "):
-                                       continue
-
-                               # Split line and check if there is a valid ASN in it...
-                               asn, name = line.split()[0:2]
-
-                               try:
-                                       asn = int(asn)
-                               except ValueError:
-                                       log.debug("Skipping ARIN AS names line not containing an integer for ASN")
-                                       continue
-
-                               if not ((1 <= asn and asn <= 23455) or (23457 <= asn and asn <= 64495) or (131072 <= asn and asn <= 4199999999)):
-                                       log.debug("Skipping ARIN AS names line not containing a valid ASN: %s" % asn)
-                                       continue
-
-                               # Skip any AS name that appears to be a placeholder for a different RIR or entity...
-                               if re.match(r"^(ASN-BLK|)(AFCONC|AFRINIC|APNIC|ASNBLK|DNIC|LACNIC|RIPE|IANA)(?:\d?$|\-)", name):
-                                       continue
-
-                               # Bail out in case the AS name contains anything we do not expect here...
-                               if re.search(r"[^a-zA-Z0-9-_]", name):
-                                       log.debug("Skipping ARIN AS name for %s containing invalid characters: %s" % \
-                                                       (asn, name))
-
-                               # Things look good here, run INSERT statement and skip this one if we already have
-                               # a (better?) name for this Autonomous System...
-                               self.db.execute("""
-                                       INSERT INTO autnums(
-                                               number,
-                                               name,
-                                               source
-                                       ) VALUES (%s, %s, %s)
-                                       ON CONFLICT (number) DO NOTHING""",
-                                       asn,
-                                       name,
-                                       "ARIN",
-                               )
-
-       def handle_update_announcements(self, ns):
-               server = ns.server[0]
-
-               with self.db.transaction():
-                       if server.startswith("/"):
-                               self._handle_update_announcements_from_bird(server)
-                       else:
-                               self._handle_update_announcements_from_telnet(server)
-
-                       # Purge anything we never want here
-                       self.db.execute("""
-                               -- Delete default routes
-                               DELETE FROM announcements WHERE network = '::/0' OR network = '0.0.0.0/0';
-
-                               -- Delete anything that is not global unicast address space
-                               DELETE FROM announcements WHERE family(network) = 6 AND NOT network <<= '2000::/3';
-
-                               -- DELETE "current network" address space
-                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '0.0.0.0/8';
-
-                               -- DELETE local loopback address space
-                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '127.0.0.0/8';
-
-                               -- DELETE RFC 1918 address space
-                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '10.0.0.0/8';
-                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '172.16.0.0/12';
-                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '192.168.0.0/16';
-
-                               -- DELETE test, benchmark and documentation address space
-                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '192.0.0.0/24';
-                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '192.0.2.0/24';
-                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '198.18.0.0/15';
-                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '198.51.100.0/24';
-                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '203.0.113.0/24';
-
-                               -- DELETE CGNAT address space (RFC 6598)
-                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '100.64.0.0/10';
-
-                               -- DELETE link local address space
-                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '169.254.0.0/16';
-
-                               -- DELETE IPv6 to IPv4 (6to4) address space (RFC 3068)
-                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '192.88.99.0/24';
-                               DELETE FROM announcements WHERE family(network) = 6 AND network <<= '2002::/16';
-
-                               -- DELETE multicast and reserved address space
-                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '224.0.0.0/4';
-                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '240.0.0.0/4';
-
-                               -- Delete networks that are too small to be in the global routing table
-                               DELETE FROM announcements WHERE family(network) = 6 AND masklen(network) > 48;
-                               DELETE FROM announcements WHERE family(network) = 4 AND masklen(network) > 24;
-
-                               -- Delete any non-public or reserved ASNs
-                               DELETE FROM announcements WHERE NOT (
-                                       (autnum >= 1 AND autnum <= 23455)
-                                       OR
-                                       (autnum >= 23457 AND autnum <= 64495)
-                                       OR
-                                       (autnum >= 131072 AND autnum <= 4199999999)
-                               );
-
-                               -- Delete everything that we have not seen for 14 days
-                               DELETE FROM announcements WHERE last_seen_at <= CURRENT_TIMESTAMP - INTERVAL '14 days';
-                       """)
-
-       def _handle_update_announcements_from_bird(self, server):
-               # Pre-compile the regular expression for faster searching
-               route = re.compile(b"^\s(.+?)\s+.+?\[AS(.*?).\]$")
-
-               log.info("Requesting routing table from Bird (%s)" % server)
-
-               # Send command to list all routes
-               for line in self._bird_cmd(server, "show route"):
-                       m = route.match(line)
-                       if not m:
-                               log.debug("Could not parse line: %s" % line.decode())
-                               continue
-
-                       # Fetch the extracted network and ASN
-                       network, autnum = m.groups()
-
-                       # Insert it into the database
-                       self.db.execute("INSERT INTO announcements(network, autnum) \
-                               VALUES(%s, %s) ON CONFLICT (network) DO \
-                               UPDATE SET autnum = excluded.autnum, last_seen_at = CURRENT_TIMESTAMP",
-                               network.decode(), autnum.decode(),
-                       )
-
-       def _handle_update_announcements_from_telnet(self, server):
-               # Pre-compile regular expression for routes
-               route = re.compile(b"^\*[\s\>]i([^\s]+).+?(\d+)\si\r\n", re.MULTILINE|re.DOTALL)
-
-               with telnetlib.Telnet(server) as t:
-                       # Enable debug mode
-                       #if ns.debug:
-                       #       t.set_debuglevel(10)
-
-                       # Wait for console greeting
-                       greeting = t.read_until(b"> ", timeout=30)
-                       if not greeting:
-                               log.error("Could not get a console prompt")
-                               return 1
-
-                       # Disable pagination
-                       t.write(b"terminal length 0\n")
-
-                       # Wait for the prompt to return
-                       t.read_until(b"> ")
-
-                       # Fetch the routing tables
-                       for protocol in ("ipv6", "ipv4"):
-                               log.info("Requesting %s routing table" % protocol)
-
-                               # Request the full unicast routing table
-                               t.write(b"show bgp %s unicast\n" % protocol.encode())
-
-                               # Read entire header which ends with "Path"
-                               t.read_until(b"Path\r\n")
-
-                               while True:
-                                       # Try reading a full entry
-                                       # Those might be broken across multiple lines but ends with i
-                                       line = t.read_until(b"i\r\n", timeout=5)
-                                       if not line:
-                                               break
-
-                                       # Show line for debugging
-                                       #log.debug(repr(line))
-
-                                       # Try finding a route in here
-                                       m = route.match(line)
-                                       if m:
-                                               network, autnum = m.groups()
-
-                                               # Convert network to string
-                                               network = network.decode()
-
-                                               # Append /24 for IPv4 addresses
-                                               if not "/" in network and not ":" in network:
-                                                       network = "%s/24" % network
-
-                                               # Convert AS number to integer
-                                               autnum = int(autnum)
-
-                                               log.info("Found announcement for %s by %s" % (network, autnum))
-
-                                               self.db.execute("INSERT INTO announcements(network, autnum) \
-                                                       VALUES(%s, %s) ON CONFLICT (network) DO \
-                                                       UPDATE SET autnum = excluded.autnum, last_seen_at = CURRENT_TIMESTAMP",
-                                                       network, autnum,
-                                               )
-
-                               log.info("Finished reading the %s routing table" % protocol)
-
-       def _bird_cmd(self, socket_path, command):
-               # Connect to the socket
-               s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-               s.connect(socket_path)
-
-               # Allocate some buffer
-               buffer = b""
-
-               # Send the command
-               s.send(b"%s\n" % command.encode())
-
-               while True:
-                       # Fill up the buffer
-                       buffer += s.recv(4096)
-
-                       while True:
-                               # Search for the next newline
-                               pos = buffer.find(b"\n")
-
-                               # If we cannot find one, we go back and read more data
-                               if pos <= 0:
-                                       break
-
-                               # Cut after the newline character
-                               pos += 1
-
-                               # Split the line we want and keep the rest in buffer
-                               line, buffer = buffer[:pos], buffer[pos:]
-
-                               # Look for the end-of-output indicator
-                               if line == b"0000 \n":
-                                       return
-
-                               # Otherwise return the line
-                               yield line
-
-       def handle_update_overrides(self, ns):
-               with self.db.transaction():
-                       # Drop all data that we have
-                       self.db.execute("""
-                               TRUNCATE TABLE autnum_overrides;
-                               TRUNCATE TABLE network_overrides;
-                       """)
-
-                       # Update overrides for various cloud providers big enough to publish their own IP
-                       # network allocation lists in a machine-readable format...
-                       self._update_overrides_for_aws()
-
-                       for file in ns.files:
-                               log.info("Reading %s..." % file)
-
-                               with open(file, "rb") as f:
-                                       for type, block in location.importer.read_blocks(f):
-                                               if type == "net":
-                                                       network = block.get("net")
-                                                       # Try to parse and normalise the network
-                                                       try:
-                                                               network = ipaddress.ip_network(network, strict=False)
-                                                       except ValueError as e:
-                                                               log.warning("Invalid IP network: %s: %s" % (network, e))
-                                                               continue
-
-                                                       # Prevent that we overwrite all networks
-                                                       if network.prefixlen == 0:
-                                                               log.warning("Skipping %s: You cannot overwrite default" % network)
-                                                               continue
-
-                                                       self.db.execute("""
-                                                               INSERT INTO network_overrides(
-                                                                       network,
-                                                                       country,
-                                                                       source,
-                                                                       is_anonymous_proxy,
-                                                                       is_satellite_provider,
-                                                                       is_anycast,
-                                                                       is_drop
-                                                               ) VALUES (%s, %s, %s, %s, %s, %s, %s)
-                                                               ON CONFLICT (network) DO NOTHING""",
-                                                               "%s" % network,
-                                                               block.get("country"),
-                                                               "manual",
-                                                               self._parse_bool(block, "is-anonymous-proxy"),
-                                                               self._parse_bool(block, "is-satellite-provider"),
-                                                               self._parse_bool(block, "is-anycast"),
-                                                               self._parse_bool(block, "drop"),
-                                                       )
-
-                                               elif type == "aut-num":
-                                                       autnum = block.get("aut-num")
-
-                                                       # Check if AS number begins with "AS"
-                                                       if not autnum.startswith("AS"):
-                                                               log.warning("Invalid AS number: %s" % autnum)
-                                                               continue
-
-                                                       # Strip "AS"
-                                                       autnum = autnum[2:]
-
-                                                       self.db.execute("""
-                                                               INSERT INTO autnum_overrides(
-                                                                       number,
-                                                                       name,
-                                                                       country,
-                                                                       source,
-                                                                       is_anonymous_proxy,
-                                                                       is_satellite_provider,
-                                                                       is_anycast,
-                                                                       is_drop
-                                                               ) VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
-                                                               ON CONFLICT DO NOTHING""",
-                                                               autnum,
-                                                               block.get("name"),
-                                                               block.get("country"),
-                                                               "manual",
-                                                               self._parse_bool(block, "is-anonymous-proxy"),
-                                                               self._parse_bool(block, "is-satellite-provider"),
-                                                               self._parse_bool(block, "is-anycast"),
-                                                               self._parse_bool(block, "drop"),
-                                                       )
-
-                                               else:
-                                                       log.warning("Unsupported type: %s" % type)
-
-       def _update_overrides_for_aws(self):
-               # Download Amazon AWS IP allocation file to create overrides...
-               downloader = location.importer.Downloader()
-
-               try:
-                       with downloader.request("https://ip-ranges.amazonaws.com/ip-ranges.json", return_blocks=False) as f:
-                               aws_ip_dump = json.load(f.body)
-               except Exception as e:
-                       log.error("unable to preprocess Amazon AWS IP ranges: %s" % e)
-                       return
-
-               # XXX: Set up a dictionary for mapping a region name to a country. Unfortunately,
-               # there seems to be no machine-readable version available of this other than
-               # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html
-               # (worse, it seems to be incomplete :-/ ); https://www.cloudping.cloud/endpoints
-               # was helpful here as well.
-               aws_region_country_map = {
-                               "af-south-1": "ZA",
-                               "ap-east-1": "HK",
-                               "ap-south-1": "IN",
-                               "ap-south-2": "IN",
-                               "ap-northeast-3": "JP",
-                               "ap-northeast-2": "KR",
-                               "ap-southeast-1": "SG",
-                               "ap-southeast-2": "AU",
-                               "ap-southeast-3": "MY",
-                               "ap-southeast-4": "AU",
-                               "ap-northeast-1": "JP",
-                               "ca-central-1": "CA",
-                               "eu-central-1": "DE",
-                               "eu-central-2": "CH",
-                               "eu-west-1": "IE",
-                               "eu-west-2": "GB",
-                               "eu-south-1": "IT",
-                               "eu-south-2": "ES",
-                               "eu-west-3": "FR",
-                               "eu-north-1": "SE",
-                               "me-central-1": "AE",
-                               "me-south-1": "BH",
-                               "sa-east-1": "BR"
-                               }
-
-               # Fetch all valid country codes to check parsed networks aganist...
-               rows = self.db.query("SELECT * FROM countries ORDER BY country_code")
-               validcountries = []
-
-               for row in rows:
-                       validcountries.append(row.country_code)
-
-               with self.db.transaction():
-                       for snetwork in aws_ip_dump["prefixes"] + aws_ip_dump["ipv6_prefixes"]:
-                               try:
-                                       network = ipaddress.ip_network(snetwork.get("ip_prefix") or snetwork.get("ipv6_prefix"), strict=False)
-                               except ValueError:
-                                       log.warning("Unable to parse line: %s" % snetwork)
-                                       continue
-
-                               # Sanitize parsed networks...
-                               if not self._check_parsed_network(network):
-                                       continue
-
-                               # Determine region of this network...
-                               region = snetwork["region"]
-                               cc = None
-                               is_anycast = False
-
-                               # Any region name starting with "us-" will get "US" country code assigned straight away...
-                               if region.startswith("us-"):
-                                       cc = "US"
-                               elif region.startswith("cn-"):
-                                       # ... same goes for China ...
-                                       cc = "CN"
-                               elif region == "GLOBAL":
-                                       # ... funny region name for anycast-like networks ...
-                                       is_anycast = True
-                               elif region in aws_region_country_map:
-                                       # ... assign looked up country code otherwise ...
-                                       cc = aws_region_country_map[region]
-                               else:
-                                       # ... and bail out if we are missing something here
-                                       log.warning("Unable to determine country code for line: %s" % snetwork)
-                                       continue
-
-                               # Skip networks with unknown country codes
-                               if not is_anycast and validcountries and cc not in validcountries:
-                                       log.warning("Skipping Amazon AWS network with bogus country '%s': %s" % \
-                                               (cc, network))
-                                       return
-
-                               # Conduct SQL statement...
-                               self.db.execute("""
-                                       INSERT INTO network_overrides(
-                                               network,
-                                               country,
-                                               source,
-                                               is_anonymous_proxy,
-                                               is_satellite_provider,
-                                               is_anycast
-                                       ) VALUES (%s, %s, %s, %s, %s, %s)
-                                       ON CONFLICT (network) DO NOTHING""",
-                                       "%s" % network,
-                                       cc,
-                                       "Amazon AWS IP feed",
-                                       None,
-                                       None,
-                                       is_anycast,
-                               )
-
-
-       @staticmethod
-       def _parse_bool(block, key):
-               val = block.get(key)
-
-               # There is no point to proceed when we got None
-               if val is None:
-                       return
-
-               # Convert to lowercase
-               val = val.lower()
-
-               # True
-               if val in ("yes", "1"):
-                       return True
-
-               # False
-               if val in ("no", "0"):
-                       return False
-
-               # Default to None
-               return None
-
-       def handle_import_countries(self, ns):
-               with self.db.transaction():
-                       # Drop all data that we have
-                       self.db.execute("TRUNCATE TABLE countries")
-
-                       for file in ns.file:
-                               for line in file:
-                                       line = line.rstrip()
-
-                                       # Ignore any comments
-                                       if line.startswith("#"):
-                                               continue
-
-                                       try:
-                                               country_code, continent_code, name = line.split(maxsplit=2)
-                                       except:
-                                               log.warning("Could not parse line: %s" % line)
-                                               continue
-
-                                       self.db.execute("INSERT INTO countries(country_code, name, continent_code) \
-                                               VALUES(%s, %s, %s) ON CONFLICT DO NOTHING", country_code, name, continent_code)
-
-
-def split_line(line):
-       key, colon, val = line.partition(":")
-
-       # Strip any excess space
-       key = key.strip()
-       val = val.strip()
-
-       return key, val
-
-def main():
-       # Run the command line interface
-       c = CLI()
-       c.run()
-
-main()
similarity index 87%
rename from src/python/__init__.py.in
rename to src/python/location/__init__.py
index bd94d3555b51fbf4e0645572cada7bafa859c438..e0ba510e6cefe7b328449e7310b360177c855279 100644 (file)
@@ -1,4 +1,3 @@
-#!/usr/bin/python3
 ###############################################################################
 #                                                                             #
 # libloc - A library to determine the location of someone on the Internet     #
 #                                                                             #
 ###############################################################################
 
-__version__ = "@VERSION@"
-
 # Import everything from the C module
 from _location import *
+from _location import __version__
 
 # Initialise logging
 from . import logger
+
+def open(path=None):
+       """
+               Opens the database at path, or opens the default database.
+       """
+       if not path:
+               path = DATABASE_PATH
+
+       # Open the database
+       return Database(path)
diff --git a/src/python/location/database.py b/src/python/location/database.py
new file mode 100644 (file)
index 0000000..c31379c
--- /dev/null
@@ -0,0 +1,169 @@
+"""
+       A lightweight wrapper around psycopg3.
+"""
+
+import asyncio
+import logging
+import psycopg
+import psycopg_pool
+import time
+
+# Setup logging
+log = logging.getLogger("location.database")
+
+class Connection(object):
+       def __init__(self, host, database, user=None, password=None):
+               # Stores connections assigned to tasks
+               self.__connections = {}
+
+               # Create a connection pool
+               self.pool = psycopg_pool.ConnectionPool(
+                       "postgresql://%s:%s@%s/%s" % (user, password, host, database),
+
+                       # Callback to configure any new connections
+                       configure=self.__configure,
+
+                       # Set limits for min/max connections in the pool
+                       min_size=1,
+                       max_size=512,
+
+                       # Give clients up to one minute to retrieve a connection
+                       timeout=60,
+
+                       # Close connections after they have been idle for a few seconds
+                       max_idle=5,
+               )
+
+       def __configure(self, conn):
+               """
+                       Configures any newly opened connections
+               """
+               # Enable autocommit
+               conn.autocommit = True
+
+               # Return any rows as dicts
+               conn.row_factory = psycopg.rows.dict_row
+
+       def connection(self, *args, **kwargs):
+               """
+                       Returns a connection from the pool
+               """
+               # Fetch the current task
+               task = asyncio.current_task()
+
+               assert task, "Could not determine task"
+
+               # Try returning the same connection to the same task
+               try:
+                       return self.__connections[task]
+               except KeyError:
+                       pass
+
+               # Fetch a new connection from the pool
+               conn = self.__connections[task] = self.pool.getconn(*args, **kwargs)
+
+               log.debug("Assigning database connection %s to %s" % (conn, task))
+
+               # When the task finishes, release the connection
+               task.add_done_callback(self.__release_connection)
+
+               return conn
+
+       def __release_connection(self, task):
+               # Retrieve the connection
+               try:
+                       conn = self.__connections[task]
+               except KeyError:
+                       return
+
+               log.debug("Releasing database connection %s of %s" % (conn, task))
+
+               # Delete it
+               del self.__connections[task]
+
+               # Return the connection back into the pool
+               self.pool.putconn(conn)
+
+       def _execute(self, cursor, execute, query, parameters):
+               # Store the time we started this query
+               #t = time.monotonic()
+
+               #try:
+               #       log.debug("Running SQL query %s" % (query % parameters))
+               #except Exception:
+               #       pass
+
+               # Execute the query
+               execute(query, parameters)
+
+               # How long did this take?
+               #elapsed = time.monotonic() - t
+
+               # Log the query time
+               #log.debug("  Query time: %.2fms" % (elapsed * 1000))
+
+       def query(self, query, *parameters, **kwparameters):
+               """
+                       Returns a row list for the given query and parameters.
+               """
+               conn = self.connection()
+
+               with conn.cursor() as cursor:
+                       self._execute(cursor, cursor.execute, query, parameters or kwparameters)
+
+                       return [Row(row) for row in cursor]
+
+       def get(self, query, *parameters, **kwparameters):
+               """
+                       Returns the first row returned for the given query.
+               """
+               rows = self.query(query, *parameters, **kwparameters)
+               if not rows:
+                       return None
+               elif len(rows) > 1:
+                       raise Exception("Multiple rows returned for Database.get() query")
+               else:
+                       return rows[0]
+
+       def execute(self, query, *parameters, **kwparameters):
+               """
+                       Executes the given query.
+               """
+               conn = self.connection()
+
+               with conn.cursor() as cursor:
+                       self._execute(cursor, cursor.execute, query, parameters or kwparameters)
+
+       def executemany(self, query, parameters):
+               """
+                       Executes the given query against all the given param sequences.
+               """
+               conn = self.connection()
+
+               with conn.cursor() as cursor:
+                       self._execute(cursor, cursor.executemany, query, parameters)
+
+       def transaction(self):
+               """
+                       Creates a new transaction on the current tasks' connection
+               """
+               conn = self.connection()
+
+               return conn.transaction()
+
+       def pipeline(self):
+               """
+                       Sets the connection into pipeline mode.
+               """
+               conn = self.connection()
+
+               return conn.pipeline()
+
+
+class Row(dict):
+       """A dict that allows for object-like property access syntax."""
+       def __getattr__(self, name):
+               try:
+                       return self[name]
+               except KeyError:
+                       raise AttributeError(name)
similarity index 81%
rename from src/python/downloader.py
rename to src/python/location/downloader.py
index 05f7872d1ff59b409c5c961e7362882c7d911888..3dffbc7743a0cd2c609eb2764a4754ee04ac6956 100644 (file)
@@ -1,4 +1,3 @@
-#!/usr/bin/python3
 ###############################################################################
 #                                                                             #
 # libloc - A library to determine the location of someone on the Internet     #
@@ -17,6 +16,7 @@
 #                                                                             #
 ###############################################################################
 
+import gzip
 import logging
 import lzma
 import os
@@ -28,8 +28,7 @@ import urllib.error
 import urllib.parse
 import urllib.request
 
-from . import __version__
-from _location import Database, DATABASE_VERSION_LATEST
+from _location import Database, DATABASE_VERSION_LATEST, __version__
 
 DATABASE_FILENAME = "location.db.xz"
 MIRRORS = (
@@ -209,3 +208,56 @@ class Downloader(object):
                                return False
 
                return True
+
+       def retrieve(self, url, timeout=None, **kwargs):
+               """
+                       This method will fetch the content at the given URL
+                       and will return a file-object to a temporary file.
+
+                       If the content was compressed, it will be decompressed on the fly.
+               """
+               # Open a temporary file to buffer the downloaded content
+               t = tempfile.SpooledTemporaryFile(max_size=100 * 1024 * 1024)
+
+               # Create a new request
+               req = self._make_request(url, **kwargs)
+
+               # Send request
+               res = self._send_request(req, timeout=timeout)
+
+               # Write the payload to the temporary file
+               with res as f:
+                       while True:
+                               buf = f.read(65536)
+                               if not buf:
+                                       break
+
+                               t.write(buf)
+
+               # Rewind the temporary file
+               t.seek(0)
+
+               gzip_compressed = False
+
+               # Fetch the content type
+               content_type = res.headers.get("Content-Type")
+
+               # Decompress any gzipped response on the fly
+               if content_type in ("application/x-gzip", "application/gzip"):
+                       gzip_compressed = True
+
+               # Check for the gzip magic in case web servers send a different MIME type
+               elif t.read(2) == b"\x1f\x8b":
+                       gzip_compressed = True
+
+               # Reset again
+               t.seek(0)
+
+               # Decompress the temporary file
+               if gzip_compressed:
+                       log.debug("Gzip compression detected")
+
+                       t = gzip.GzipFile(fileobj=t, mode="rb")
+
+               # Return the temporary file handle
+               return t
similarity index 54%
rename from src/python/export.py
rename to src/python/location/export.py
index 3b9e1e0262634a0fded1c43a2b7f0dce6ed8fe12..1d147b1fc998b96b73a3d24431fdb923d94b6fb4 100644 (file)
@@ -1,4 +1,3 @@
-#!/usr/bin/python3
 ###############################################################################
 #                                                                             #
 # libloc - A library to determine the location of someone on the Internet     #
 import io
 import ipaddress
 import logging
+import math
 import os
 import socket
+import sys
 
+from .i18n import _
 import _location
 
 # Initialise logging
@@ -40,23 +42,51 @@ class OutputWriter(object):
        suffix = "networks"
        mode = "w"
 
-       def __init__(self, f, prefix=None):
-               self.f, self.prefix = f, prefix
+       def __init__(self, name, family=None, directory=None, f=None):
+               self.name = name
+               self.family = family
+               self.directory = directory
+
+               # Tag
+               self.tag = self._make_tag()
+
+               # Open output file
+               if f:
+                       self.f = f
+               elif self.directory:
+                       self.f = open(self.filename, self.mode)
+               elif "b" in self.mode:
+                       self.f = io.BytesIO()
+               else:
+                       self.f = io.StringIO()
+
+               # Call any custom initialization
+               self.init()
 
                # Immediately write the header
                self._write_header()
 
-       @classmethod
-       def open(cls, filename, **kwargs):
+       def init(self):
                """
-                       Convenience function to open a file
+                       To be overwritten by anything that inherits from this
                """
-               f = open(filename, cls.mode)
-
-               return cls(f, **kwargs)
+               pass
 
        def __repr__(self):
-               return "<%s f=%s>" % (self.__class__.__name__, self.f)
+               return "<%s %s f=%s>" % (self.__class__.__name__, self, self.f)
+
+       def _make_tag(self):
+               families = {
+                       socket.AF_INET6 : "6",
+                       socket.AF_INET  : "4",
+               }
+
+               return "%sv%s" % (self.name, families.get(self.family, "?"))
+
+       @property
+       def filename(self):
+               if self.directory:
+                       return os.path.join(self.directory, "%s.%s" % (self.tag, self.suffix))
 
        def _write_header(self):
                """
@@ -79,8 +109,22 @@ class OutputWriter(object):
                """
                self._write_footer()
 
-               # Close the file
-               self.f.close()
+               # Flush all output
+               self.f.flush()
+
+       def print(self):
+               """
+                       Prints the entire output line by line
+               """
+               if isinstance(self.f, io.BytesIO):
+                       raise TypeError(_("Won't write binary output to stdout"))
+
+               # Go back to the beginning
+               self.f.seek(0)
+
+               # Iterate over everything line by line
+               for line in self.f:
+                       sys.stdout.write(line)
 
 
 class IpsetOutputWriter(OutputWriter):
@@ -89,11 +133,66 @@ class IpsetOutputWriter(OutputWriter):
        """
        suffix = "ipset"
 
+       # The value is being used if we don't know any better
+       DEFAULT_HASHSIZE = 64
+
+       # We aim for this many networks in a bucket on average. This allows us to choose
+       # how much memory we want to sacrifice to gain better performance. The lower the
+       # factor, the faster a lookup will be, but it will use more memory.
+       # We will aim for only using three quarters of all buckets to avoid any searches
+       # through the linked lists.
+       HASHSIZE_FACTOR = 0.75
+
+       def init(self):
+               # Count all networks
+               self.networks = 0
+
+               # Check that family is being set
+               if not self.family:
+                       raise ValueError("%s requires family being set" % self.__class__.__name__)
+
+       @property
+       def hashsize(self):
+               """
+                       Calculates an optimized hashsize
+               """
+               # Return the default value if we don't know the size of the set
+               if not self.networks:
+                       return self.DEFAULT_HASHSIZE
+
+               # Find the nearest power of two that is larger than the number of networks
+               # divided by the hashsize factor.
+               exponent = math.log(self.networks / self.HASHSIZE_FACTOR, 2)
+
+               # Return the size of the hash (the minimum is 64)
+               return max(2 ** math.ceil(exponent), 64)
+
        def _write_header(self):
-               self.f.write("create %s hash:net family inet hashsize 1024 maxelem 65536\n" % self.prefix)
+               # This must have a fixed size, because we will write the header again in the end
+               self.f.write("create %s hash:net family inet%s" % (
+                       self.tag,
+                       "6" if self.family == socket.AF_INET6 else ""
+               ))
+               self.f.write(" hashsize %8d maxelem 1048576 -exist\n" % self.hashsize)
+               self.f.write("flush %s\n" % self.tag)
 
        def write(self, network):
-               self.f.write("add %s %s\n" % (self.prefix, network))
+               self.f.write("add %s %s\n" % (self.tag, network))
+
+               # Increment network counter
+               self.networks += 1
+
+       def _write_footer(self):
+               # Jump back to the beginning of the file
+               try:
+                       self.f.seek(0)
+
+               # If the output stream isn't seekable, we won't try writing the header again
+               except io.UnsupportedOperation:
+                       return
+
+               # Rewrite the header with better configuration
+               self._write_header()
 
 
 class NftablesOutputWriter(OutputWriter):
@@ -103,7 +202,7 @@ class NftablesOutputWriter(OutputWriter):
        suffix = "set"
 
        def _write_header(self):
-               self.f.write("define %s = {\n" % self.prefix)
+               self.f.write("define %s = {\n" % self.tag)
 
        def _write_footer(self):
                self.f.write("}\n")
@@ -117,9 +216,16 @@ class XTGeoIPOutputWriter(OutputWriter):
                Formats the output in that way, that it can be loaded by
                the xt_geoip kernel module from xtables-addons.
        """
-       suffix = "iv"
        mode = "wb"
 
+       @property
+       def tag(self):
+               return self.name
+
+       @property
+       def suffix(self):
+               return "iv%s" % ("6" if self.family == socket.AF_INET6 else "4")
+
        def write(self, network):
                self.f.write(network._first_address)
                self.f.write(network._last_address)
@@ -144,19 +250,11 @@ class Exporter(object):
 
                        # Create writers for countries
                        for country_code in countries:
-                               filename = self._make_filename(
-                                       directory, prefix=country_code, suffix=self.writer.suffix, family=family,
-                               )
-
-                               writers[country_code] = self.writer.open(filename, prefix="CC_%s" % country_code)
+                               writers[country_code] = self.writer(country_code, family=family, directory=directory)
 
                        # Create writers for ASNs
                        for asn in asns:
-                               filename = self._make_filename(
-                                       directory, "AS%s" % asn, suffix=self.writer.suffix, family=family,
-                               )
-
-                               writers[asn] = self.writer.open(filename, prefix="AS%s" % asn)
+                               writers[asn] = self.writer("AS%s" % asn, family=family, directory=directory)
 
                        # Filter countries from special country codes
                        country_codes = [
@@ -196,9 +294,7 @@ class Exporter(object):
                        for writer in writers.values():
                                writer.finish()
 
-       def _make_filename(self, directory, prefix, suffix, family):
-               filename = "%s.%s%s" % (
-                       prefix, suffix, "6" if family == socket.AF_INET6 else "4"
-               )
-
-               return os.path.join(directory, filename)
+                       # Print to stdout
+                       if not directory:
+                               for writer in writers.values():
+                                       writer.print()
similarity index 98%
rename from src/python/i18n.py
rename to src/python/location/i18n.py
index 2161aa67c9b52122e6ec114d90ce28f7f219362a..c97c51c5e8aac041ce520b223f25a508114d3df8 100644 (file)
@@ -1,4 +1,3 @@
-#!/usr/bin/python3
 ###############################################################################
 #                                                                             #
 # libloc - A library to determine the location of someone on the Internet     #
similarity index 99%
rename from src/python/logger.py
rename to src/python/location/logger.py
index 0bdf9ec45d0f7ca7b86d7a1e0b44b29f7666f516..62ad8fbc268dffa10e227aa4bd0a68ffb52ad6ff 100644 (file)
@@ -1,4 +1,3 @@
-#!/usr/bin/python3
 ###############################################################################
 #                                                                             #
 # libloc - A library to determine the location of someone on the Internet     #
index 5dd4ec645ccf3854950906980cd7c46db3f66946..45c7bd883c0188948bd87693107309a4dd8040ee 100644 (file)
@@ -17,8 +17,8 @@
 #include <Python.h>
 #include <syslog.h>
 
-#include <loc/format.h>
-#include <loc/resolv.h>
+#include <libloc/format.h>
+#include <libloc/resolv.h>
 
 #include "locationmodule.h"
 #include "as.h"
@@ -117,6 +117,14 @@ PyMODINIT_FUNC PyInit__location(void) {
        if (!m)
                return NULL;
 
+       // Version
+       if (PyModule_AddStringConstant(m, "__version__", PACKAGE_VERSION))
+               return NULL;
+
+       // Default Database Path
+       if (PyModule_AddStringConstant(m, "DATABASE_PATH", LIBLOC_DEFAULT_DATABASE_PATH))
+               return NULL;
+
        // AS
        if (PyType_Ready(&ASType) < 0)
                return NULL;
@@ -143,7 +151,7 @@ PyMODINIT_FUNC PyInit__location(void) {
                return NULL;
 
        Py_INCREF(&DatabaseEnumeratorType);
-       //PyModule_AddObject(m, "DatabaseEnumerator", (PyObject *)&DatabaseEnumeratorType);
+       PyModule_AddObject(m, "DatabaseEnumerator", (PyObject *)&DatabaseEnumeratorType);
 
        // Network
        if (PyType_Ready(&NetworkType) < 0)
index 2f4d652c20c7475b037e934565f3cfe3a7c70cd3..e2679868a88ee92be510834cd2795047b128bffc 100644 (file)
@@ -17,8 +17,7 @@
 #ifndef PYTHON_LOCATION_MODULE_H
 #define PYTHON_LOCATION_MODULE_H
 
-#include <loc/libloc.h>
-#include <loc/as.h>
+#include <libloc/libloc.h>
 
 extern struct loc_ctx* loc_ctx;
 
index 5b1369d6fd91352f680f7ee33fa677816392b19c..4bae918f3f06997fd4bde05eacd636eb489c30a3 100644 (file)
 #include <errno.h>
 #include <limits.h>
 
-#include <loc/libloc.h>
-#include <loc/network.h>
-#include <loc/network-list.h>
+#include <libloc/compat.h>
+#include <libloc/libloc.h>
+#include <libloc/network.h>
+#include <libloc/network-list.h>
 
 #include "locationmodule.h"
 #include "network.h"
@@ -83,21 +84,15 @@ static int Network_init(NetworkObject* self, PyObject* args, PyObject* kwargs) {
 }
 
 static PyObject* Network_repr(NetworkObject* self) {
-       char* network = loc_network_str(self->network);
+       const char* network = loc_network_str(self->network);
 
-       PyObject* obj = PyUnicode_FromFormat("<location.Network %s>", network);
-       free(network);
-
-       return obj;
+       return PyUnicode_FromFormat("<location.Network %s>", network);
 }
 
 static PyObject* Network_str(NetworkObject* self) {
-       char* network = loc_network_str(self->network);
+       const char* network = loc_network_str(self->network);
 
-       PyObject* obj = PyUnicode_FromString(network);
-       free(network);
-
-       return obj;
+       return PyUnicode_FromString(network);
 }
 
 static PyObject* Network_get_country_code(NetworkObject* self) {
@@ -216,12 +211,9 @@ static PyObject* Network_get_family(NetworkObject* self) {
 }
 
 static PyObject* Network_get_first_address(NetworkObject* self) {
-       char* address = loc_network_format_first_address(self->network);
+       const char* address = loc_network_format_first_address(self->network);
 
-       PyObject* obj = PyUnicode_FromString(address);
-       free(address);
-
-       return obj;
+       return PyUnicode_FromString(address);
 }
 
 static PyObject* PyBytes_FromAddress(const struct in6_addr* address6) {
@@ -245,12 +237,9 @@ static PyObject* Network_get__first_address(NetworkObject* self) {
 }
 
 static PyObject* Network_get_last_address(NetworkObject* self) {
-       char* address = loc_network_format_last_address(self->network);
+       const char* address = loc_network_format_last_address(self->network);
 
-       PyObject* obj = PyUnicode_FromString(address);
-       free(address);
-
-       return obj;
+       return PyUnicode_FromString(address);
 }
 
 static PyObject* Network_get__last_address(NetworkObject* self) {
@@ -259,6 +248,63 @@ static PyObject* Network_get__last_address(NetworkObject* self) {
        return PyBytes_FromAddress(address);
 }
 
+static PyObject* Network_richcompare(NetworkObject* self, PyObject* other, int op) {
+       int r;
+
+       // Check for type
+       if (!PyObject_IsInstance(other, (PyObject *)&NetworkType))
+               Py_RETURN_NOTIMPLEMENTED;
+
+       NetworkObject* o = (NetworkObject*)other;
+
+       r = loc_network_cmp(self->network, o->network);
+
+       switch (op) {
+               case Py_EQ:
+                       if (r == 0)
+                               Py_RETURN_TRUE;
+
+                       Py_RETURN_FALSE;
+
+               case Py_LT:
+                       if (r < 0)
+                               Py_RETURN_TRUE;
+
+                       Py_RETURN_FALSE;
+
+               default:
+                       break;
+       }
+
+       Py_RETURN_NOTIMPLEMENTED;
+}
+
+static PyObject* Network_reverse_pointer(NetworkObject* self, PyObject* args, PyObject* kwargs) {
+       char* kwlist[] = { "suffix", NULL };
+       const char* suffix = NULL;
+       char* rp = NULL;
+
+       if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|z", kwlist, &suffix))
+               return NULL;
+
+       rp = loc_network_reverse_pointer(self->network, suffix);
+       if (!rp) {
+               switch (errno) {
+                       case ENOTSUP:
+                               Py_RETURN_NONE;
+
+                       default:
+                               PyErr_SetFromErrno(PyExc_OSError);
+                               return NULL;
+               }
+       }
+
+       PyObject* ret = PyUnicode_FromString(rp);
+       free(rp);
+
+       return ret;
+}
+
 static struct PyMethodDef Network_methods[] = {
        {
                "exclude",
@@ -278,6 +324,12 @@ static struct PyMethodDef Network_methods[] = {
                METH_VARARGS,
                NULL,
        },
+       {
+               "reverse_pointer",
+               (PyCFunction)Network_reverse_pointer,
+               METH_VARARGS|METH_KEYWORDS,
+               NULL,
+       },
        {
                "set_flag",
                (PyCFunction)Network_set_flag,
@@ -353,4 +405,5 @@ PyTypeObject NetworkType = {
        .tp_getset =             Network_getsetters,
        .tp_repr =               (reprfunc)Network_repr,
        .tp_str =                (reprfunc)Network_str,
+       .tp_richcompare =        (richcmpfunc)Network_richcompare,
 };
index 43665ba0f0be45bf75ad4c109d0768a487ca826c..b137e7208595c3ec6187600cb15bc3dbcb8d13cc 100644 (file)
@@ -19,7 +19,7 @@
 
 #include <Python.h>
 
-#include <loc/network.h>
+#include <libloc/network.h>
 
 typedef struct {
        PyObject_HEAD
index a3ceae69fb85af0fa265a20b69629965f491ac82..1c06384a326b5a8f26ed1a560a2c2e1205d3e1dc 100644 (file)
@@ -16,8 +16,8 @@
 
 #include <Python.h>
 
-#include <loc/libloc.h>
-#include <loc/writer.h>
+#include <libloc/libloc.h>
+#include <libloc/writer.h>
 
 #include "locationmodule.h"
 #include "as.h"
@@ -67,8 +67,8 @@ static int Writer_init(WriterObject* self, PyObject* args, PyObject* kwargs) {
                        return -1;
 
                // Re-open file descriptor
-               f2 = fdopen(fd, "r");
-               if (!f2) {
+               f1 = fdopen(fd, "r");
+               if (!f1) {
                        PyErr_SetFromErrno(PyExc_IOError);
                        return -1;
                }
@@ -227,7 +227,7 @@ static PyObject* Writer_write(WriterObject* self, PyObject* args) {
 
        FILE* f = fopen(path, "w+");
        if (!f) {
-               PyErr_Format(PyExc_IOError, strerror(errno));
+               PyErr_SetFromErrno(PyExc_OSError);
                return NULL;
        }
 
@@ -236,7 +236,7 @@ static PyObject* Writer_write(WriterObject* self, PyObject* args) {
 
        // Raise any errors
        if (r) {
-               PyErr_Format(PyExc_IOError, strerror(errno));
+               PyErr_SetFromErrno(PyExc_OSError);
                return NULL;
        }
 
index 6fe4c8d1e7e12f2100fd54b07c90d024531fc1ef..10ca26bf95c928c35aeadc34c8c616646128453b 100644 (file)
@@ -19,7 +19,7 @@
 
 #include <Python.h>
 
-#include <loc/writer.h>
+#include <libloc/writer.h>
 
 typedef struct {
        PyObject_HEAD
index e42e9de3e7673fb119236eecd4442af90d2d6add..1c4cd75a1cb069cb3e63fe73ac763caaed6cf341 100644 (file)
@@ -20,9 +20,9 @@
 #include <string.h>
 #include <time.h>
 
-#include <loc/format.h>
-#include <loc/private.h>
-#include <loc/resolv.h>
+#include <libloc/format.h>
+#include <libloc/private.h>
+#include <libloc/resolv.h>
 
 static int parse_timestamp(const unsigned char* txt, time_t* t) {
     struct tm ts;
diff --git a/src/scripts/location-importer.in b/src/scripts/location-importer.in
new file mode 100644 (file)
index 0000000..c65029b
--- /dev/null
@@ -0,0 +1,2579 @@
+#!/usr/bin/python3
+###############################################################################
+#                                                                             #
+# libloc - A library to determine the location of someone on the Internet     #
+#                                                                             #
+# Copyright (C) 2020-2024 IPFire Development Team <info@ipfire.org>           #
+#                                                                             #
+# This library is free software; you can redistribute it and/or               #
+# modify it under the terms of the GNU Lesser General Public                  #
+# License as published by the Free Software Foundation; either                #
+# version 2.1 of the License, or (at your option) any later version.          #
+#                                                                             #
+# This library is distributed in the hope that it will be useful,             #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of              #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU           #
+# Lesser General Public License for more details.                             #
+#                                                                             #
+###############################################################################
+
+import argparse
+import asyncio
+import csv
+import functools
+import http.client
+import io
+import ipaddress
+import json
+import logging
+import math
+import re
+import socket
+import sys
+import urllib.error
+
+# Load our location module
+import location
+import location.database
+from location.downloader import Downloader
+from location.i18n import _
+
+# Initialise logging
+log = logging.getLogger("location.importer")
+log.propagate = 1
+
+# Define constants
+VALID_ASN_RANGES = (
+       (1, 23455),
+       (23457, 64495),
+       (131072, 4199999999),
+)
+
+TRANSLATED_COUNTRIES = {
+       # When people say UK, they mean GB
+       "UK" : "GB",
+}
+
+IGNORED_COUNTRIES = set((
+       # Formerly Yugoslavia
+       "YU",
+
+       # Some people use ZZ to say "no country" or to hide the country
+       "ZZ",
+))
+
+# Configure the CSV parser for ARIN
+csv.register_dialect("arin", delimiter=",", quoting=csv.QUOTE_ALL, quotechar="\"")
+
+class CLI(object):
+       def parse_cli(self):
+               parser = argparse.ArgumentParser(
+                       description=_("Location Importer Command Line Interface"),
+               )
+               subparsers = parser.add_subparsers()
+
+               # Global configuration flags
+               parser.add_argument("--debug", action="store_true",
+                       help=_("Enable debug output"))
+               parser.add_argument("--quiet", action="store_true",
+                       help=_("Enable quiet mode"))
+
+               # version
+               parser.add_argument("--version", action="version",
+                       version="%(prog)s @VERSION@")
+
+               # Database
+               parser.add_argument("--database-host", required=True,
+                       help=_("Database Hostname"), metavar=_("HOST"))
+               parser.add_argument("--database-name", required=True,
+                       help=_("Database Name"), metavar=_("NAME"))
+               parser.add_argument("--database-username", required=True,
+                       help=_("Database Username"), metavar=_("USERNAME"))
+               parser.add_argument("--database-password", required=True,
+                       help=_("Database Password"), metavar=_("PASSWORD"))
+
+               # Write Database
+               write = subparsers.add_parser("write", help=_("Write database to file"))
+               write.set_defaults(func=self.handle_write)
+               write.add_argument("file", nargs=1, help=_("Database File"))
+               write.add_argument("--signing-key", nargs="?", type=open, help=_("Signing Key"))
+               write.add_argument("--backup-signing-key", nargs="?", type=open, help=_("Backup Signing Key"))
+               write.add_argument("--vendor", nargs="?", help=_("Sets the vendor"))
+               write.add_argument("--description", nargs="?", help=_("Sets a description"))
+               write.add_argument("--license", nargs="?", help=_("Sets the license"))
+               write.add_argument("--version", type=int, help=_("Database Format Version"))
+
+               # Update WHOIS
+               update_whois = subparsers.add_parser("update-whois", help=_("Update WHOIS Information"))
+               update_whois.add_argument("sources", nargs="*",
+                       help=_("Only update these sources"))
+               update_whois.set_defaults(func=self.handle_update_whois)
+
+               # Update announcements
+               update_announcements = subparsers.add_parser("update-announcements",
+                       help=_("Update BGP Annoucements"))
+               update_announcements.set_defaults(func=self.handle_update_announcements)
+               update_announcements.add_argument("server", nargs=1,
+                       help=_("Route Server to connect to"), metavar=_("SERVER"))
+
+               # Update geofeeds
+               update_geofeeds = subparsers.add_parser("update-geofeeds",
+                       help=_("Update Geofeeds"))
+               update_geofeeds.set_defaults(func=self.handle_update_geofeeds)
+
+               # Update feeds
+               update_feeds = subparsers.add_parser("update-feeds",
+                       help=_("Update Feeds"))
+               update_feeds.add_argument("feeds", nargs="*",
+                       help=_("Only update these feeds"))
+               update_feeds.set_defaults(func=self.handle_update_feeds)
+
+               # Update overrides
+               update_overrides = subparsers.add_parser("update-overrides",
+                       help=_("Update overrides"),
+               )
+               update_overrides.add_argument(
+                       "files", nargs="+", help=_("Files to import"),
+               )
+               update_overrides.set_defaults(func=self.handle_update_overrides)
+
+               # Import countries
+               import_countries = subparsers.add_parser("import-countries",
+                       help=_("Import countries"),
+               )
+               import_countries.add_argument("file", nargs=1, type=argparse.FileType("r"),
+                       help=_("File to import"))
+               import_countries.set_defaults(func=self.handle_import_countries)
+
+               args = parser.parse_args()
+
+               # Configure logging
+               if args.debug:
+                       location.logger.set_level(logging.DEBUG)
+               elif args.quiet:
+                       location.logger.set_level(logging.WARNING)
+
+               # Print usage if no action was given
+               if not "func" in args:
+                       parser.print_usage()
+                       sys.exit(2)
+
+               return args
+
+       async def run(self):
+               # Parse command line arguments
+               args = self.parse_cli()
+
+               # Initialize the downloader
+               self.downloader = Downloader()
+
+               # Initialise database
+               self.db = self._setup_database(args)
+
+               # Call function
+               ret = await args.func(args)
+
+               # Return with exit code
+               if ret:
+                       sys.exit(ret)
+
+               # Otherwise just exit
+               sys.exit(0)
+
+       def _setup_database(self, ns):
+               """
+                       Initialise the database
+               """
+               # Connect to database
+               db = location.database.Connection(
+                       host=ns.database_host, database=ns.database_name,
+                       user=ns.database_username, password=ns.database_password,
+               )
+
+               with db.transaction():
+                       db.execute("""
+                               -- announcements
+                               CREATE TABLE IF NOT EXISTS announcements(network inet, autnum bigint,
+                                       first_seen_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP,
+                                       last_seen_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP);
+                               CREATE UNIQUE INDEX IF NOT EXISTS announcements_networks ON announcements(network);
+                               CREATE INDEX IF NOT EXISTS announcements_search2 ON announcements
+                                       USING SPGIST(network inet_ops);
+
+                               -- autnums
+                               CREATE TABLE IF NOT EXISTS autnums(number bigint, name text NOT NULL);
+                               ALTER TABLE autnums ADD COLUMN IF NOT EXISTS source text;
+                               CREATE UNIQUE INDEX IF NOT EXISTS autnums_number ON autnums(number);
+
+                               -- countries
+                               CREATE TABLE IF NOT EXISTS countries(
+                                       country_code text NOT NULL, name text NOT NULL, continent_code text NOT NULL);
+                               CREATE UNIQUE INDEX IF NOT EXISTS countries_country_code ON countries(country_code);
+
+                               -- networks
+                               CREATE TABLE IF NOT EXISTS networks(network inet, country text);
+                               ALTER TABLE networks ADD COLUMN IF NOT EXISTS original_countries text[];
+                               ALTER TABLE networks ADD COLUMN IF NOT EXISTS source text;
+                               CREATE UNIQUE INDEX IF NOT EXISTS networks_network ON networks(network);
+                               CREATE INDEX IF NOT EXISTS networks_search2 ON networks
+                                       USING SPGIST(network inet_ops);
+
+                               -- geofeeds
+                               CREATE TABLE IF NOT EXISTS geofeeds(
+                                       id serial primary key,
+                                       url text,
+                                       status integer default null,
+                                       updated_at timestamp without time zone default null
+                               );
+                               ALTER TABLE geofeeds ADD COLUMN IF NOT EXISTS error text;
+                               CREATE UNIQUE INDEX IF NOT EXISTS geofeeds_unique
+                                       ON geofeeds(url);
+                               CREATE TABLE IF NOT EXISTS geofeed_networks(
+                                       geofeed_id integer references geofeeds(id) on delete cascade,
+                                       network inet,
+                                       country text,
+                                       region text,
+                                       city text
+                               );
+                               CREATE INDEX IF NOT EXISTS geofeed_networks_geofeed_id
+                                       ON geofeed_networks(geofeed_id);
+                               CREATE TABLE IF NOT EXISTS network_geofeeds(network inet, url text);
+                               ALTER TABLE network_geofeeds ADD COLUMN IF NOT EXISTS source text NOT NULL;
+                               CREATE UNIQUE INDEX IF NOT EXISTS network_geofeeds_unique2
+                                       ON network_geofeeds(network, url);
+                               CREATE INDEX IF NOT EXISTS network_geofeeds_url
+                                       ON network_geofeeds(url);
+
+                               -- feeds
+                               CREATE TABLE IF NOT EXISTS autnum_feeds(
+                                       number bigint NOT NULL,
+                                       source text NOT NULL,
+                                       name text,
+                                       country text,
+                                       is_anonymous_proxy boolean,
+                                       is_satellite_provider boolean,
+                                       is_anycast boolean,
+                                       is_drop boolean
+                               );
+                               CREATE UNIQUE INDEX IF NOT EXISTS autnum_feeds_unique
+                                       ON autnum_feeds(number, source);
+
+                               CREATE TABLE IF NOT EXISTS network_feeds(
+                                       network inet NOT NULL,
+                                       source text NOT NULL,
+                                       country text,
+                                       is_anonymous_proxy boolean,
+                                       is_satellite_provider boolean,
+                                       is_anycast boolean,
+                                       is_drop boolean
+                               );
+                               CREATE UNIQUE INDEX IF NOT EXISTS network_feeds_unique
+                                       ON network_feeds(network, source);
+
+                               -- overrides
+                               CREATE TABLE IF NOT EXISTS autnum_overrides(
+                                       number bigint NOT NULL,
+                                       name text,
+                                       country text,
+                                       is_anonymous_proxy boolean,
+                                       is_satellite_provider boolean,
+                                       is_anycast boolean
+                               );
+                               CREATE UNIQUE INDEX IF NOT EXISTS autnum_overrides_number
+                                       ON autnum_overrides(number);
+                               ALTER TABLE autnum_overrides ADD COLUMN IF NOT EXISTS is_drop boolean;
+                               ALTER TABLE autnum_overrides DROP COLUMN IF EXISTS source;
+
+                               CREATE TABLE IF NOT EXISTS network_overrides(
+                                       network inet NOT NULL,
+                                       country text,
+                                       is_anonymous_proxy boolean,
+                                       is_satellite_provider boolean,
+                                       is_anycast boolean
+                               );
+                               CREATE UNIQUE INDEX IF NOT EXISTS network_overrides_network
+                                       ON network_overrides(network);
+                               ALTER TABLE network_overrides ADD COLUMN IF NOT EXISTS is_drop boolean;
+                               ALTER TABLE network_overrides DROP COLUMN IF EXISTS source;
+
+                               -- Cleanup things we no longer need
+                               DROP TABLE IF EXISTS geofeed_overrides;
+                               DROP INDEX IF EXISTS announcements_family;
+                               DROP INDEX IF EXISTS announcements_search;
+                               DROP INDEX IF EXISTS geofeed_networks_search;
+                               DROP INDEX IF EXISTS networks_family;
+                               DROP INDEX IF EXISTS networks_search;
+                               DROP INDEX IF EXISTS network_feeds_search;
+                               DROP INDEX IF EXISTS network_geofeeds_unique;
+                               DROP INDEX IF EXISTS network_geofeeds_search;
+                               DROP INDEX IF EXISTS network_overrides_search;
+                       """)
+
+               return db
+
+       def fetch_countries(self):
+               """
+                       Returns a list of all countries on the list
+               """
+               # Fetch all valid country codes to check parsed networks aganist...
+               countries = self.db.query("SELECT country_code FROM countries ORDER BY country_code")
+
+               return set((country.country_code for country in countries))
+
+       async def handle_write(self, ns):
+               """
+                       Compiles a database in libloc format out of what is in the database
+               """
+               # Allocate a writer
+               writer = location.Writer(ns.signing_key, ns.backup_signing_key)
+
+               # Set all metadata
+               if ns.vendor:
+                       writer.vendor = ns.vendor
+
+               if ns.description:
+                       writer.description = ns.description
+
+               if ns.license:
+                       writer.license = ns.license
+
+               # Analyze everything for the query planner hopefully making better decisions
+               self.db.execute("ANALYZE")
+
+               # Add all Autonomous Systems
+               log.info("Writing Autonomous Systems...")
+
+               # Select all ASes with a name
+               rows = self.db.query("""
+                       SELECT
+                               autnums.number AS number,
+                               COALESCE(
+                                       overrides.name,
+                                       autnums.name
+                               ) AS name
+                       FROM
+                               autnums
+                       LEFT JOIN
+                               autnum_overrides overrides ON autnums.number = overrides.number
+                       ORDER BY
+                               autnums.number
+                       """)
+
+               for row in rows:
+                       # Skip AS without names
+                       if not row.name:
+                               continue
+
+                       a = writer.add_as(row.number)
+                       a.name = row.name
+
+               # Add all networks
+               log.info("Writing networks...")
+
+               # Create a new temporary table where we collect
+               # the networks that we are interested in
+               self.db.execute("""
+                       CREATE TEMPORARY TABLE
+                               n
+                       (
+                               network               inet NOT NULL,
+                               autnum                integer,
+                               country               text,
+                               is_anonymous_proxy    boolean,
+                               is_satellite_provider boolean,
+                               is_anycast            boolean,
+                               is_drop               boolean
+                       )
+                       WITH (FILLFACTOR = 50)
+               """)
+
+               # Add all known networks
+               self.db.execute("""
+                       INSERT INTO
+                               n
+                       (
+                               network
+                       )
+
+                       SELECT
+                               network
+                       FROM
+                               announcements
+
+                       UNION
+
+                       SELECT
+                               network
+                       FROM
+                               networks
+
+                       UNION
+
+                       SELECT
+                               network
+                       FROM
+                               network_feeds
+
+                       UNION
+
+                       SELECT
+                               network
+                       FROM
+                               network_overrides
+
+                       UNION
+
+                       SELECT
+                               network
+                       FROM
+                               geofeed_networks
+               """)
+
+               # Create an index to search through networks faster
+               self.db.execute("""
+                       CREATE INDEX
+                               n_search
+                       ON
+                               n
+                       USING
+                               SPGIST(network)
+               """)
+
+               # Analyze n
+               self.db.execute("ANALYZE n")
+
+               # Apply the AS number to all networks
+               self.db.execute("""
+                       -- Join all networks together with their most specific announcements
+                       WITH announcements AS (
+                               SELECT
+                                       n.network,
+                                       announcements.autnum,
+
+                                       -- Sort all merges and number them so
+                                       -- that we can later select the best one
+                                       ROW_NUMBER()
+                                       OVER
+                                       (
+                                               PARTITION BY
+                                                       n.network
+                                               ORDER BY
+                                                       masklen(announcements.network) DESC
+                                       ) AS row
+                               FROM
+                                       n
+                               JOIN
+                                       announcements
+                               ON
+                                       announcements.network >>= n.network
+                       )
+
+                       -- Store the result
+                       UPDATE
+                               n
+                       SET
+                               autnum = announcements.autnum
+                       FROM
+                               announcements
+                       WHERE
+                               announcements.network = n.network
+                       AND
+                               announcements.row = 1
+                       """,
+               )
+
+               # Apply country information
+               self.db.execute("""
+                       WITH networks AS (
+                               SELECT
+                                       n.network,
+                                       networks.country,
+
+                                       ROW_NUMBER()
+                                       OVER
+                                       (
+                                               PARTITION BY
+                                                       n.network
+                                               ORDER BY
+                                                       masklen(networks.network) DESC
+                                       ) AS row
+                               FROM
+                                       n
+                               JOIN
+                                       networks
+                               ON
+                                       networks.network >>= n.network
+                       )
+
+                       UPDATE
+                               n
+                       SET
+                               country = networks.country
+                       FROM
+                               networks
+                       WHERE
+                               networks.network = n.network
+                       AND
+                               networks.row = 1
+                       """,
+               )
+
+               # Add all country information from Geofeeds
+               self.db.execute("""
+                       WITH geofeeds AS (
+                               SELECT
+                                       DISTINCT ON (geofeed_networks.network)
+                                       geofeed_networks.network,
+                                       geofeed_networks.country
+                               FROM
+                                       geofeeds
+                               JOIN
+                                       network_geofeeds networks
+                               ON
+                                       geofeeds.url = networks.url
+                               JOIN
+                                       geofeed_networks
+                               ON
+                                       geofeeds.id = geofeed_networks.geofeed_id
+                               AND
+                                       networks.network >>= geofeed_networks.network
+                       ),
+
+                       networks AS (
+                               SELECT
+                                       n.network,
+                                       geofeeds.country,
+
+                                       ROW_NUMBER()
+                                       OVER
+                                       (
+                                               PARTITION BY
+                                                       n.network
+                                               ORDER BY
+                                                       masklen(geofeeds.network) DESC
+                                       ) AS row
+                               FROM
+                                       n
+                               JOIN
+                                       geofeeds
+                               ON
+                                       geofeeds.network >>= n.network
+                       )
+
+                       UPDATE
+                               n
+                       SET
+                               country = networks.country
+                       FROM
+                               networks
+                       WHERE
+                               networks.network = n.network
+                       AND
+                               networks.row = 1
+                       """,
+               )
+
+               # Apply country and flags from feeds
+               self.db.execute("""
+                       WITH networks AS (
+                               SELECT
+                                       n.network,
+                                       network_feeds.country,
+
+                                       -- Flags
+                                       network_feeds.is_anonymous_proxy,
+                                       network_feeds.is_satellite_provider,
+                                       network_feeds.is_anycast,
+                                       network_feeds.is_drop,
+
+                                       ROW_NUMBER()
+                                       OVER
+                                       (
+                                               PARTITION BY
+                                                       n.network
+                                               ORDER BY
+                                                       masklen(network_feeds.network) DESC
+                                       ) AS row
+                               FROM
+                                       n
+                               JOIN
+                                       network_feeds
+                               ON
+                                       network_feeds.network >>= n.network
+                       )
+
+                       UPDATE
+                               n
+                       SET
+                               country =
+                                       COALESCE(networks.country, n.country),
+
+                               is_anonymous_proxy =
+                                       COALESCE(networks.is_anonymous_proxy, n.is_anonymous_proxy),
+
+                               is_satellite_provider =
+                                       COALESCE(networks.is_satellite_provider, n.is_satellite_provider),
+
+                               is_anycast =
+                                       COALESCE(networks.is_anycast, n.is_anycast),
+
+                               is_drop =
+                                       COALESCE(networks.is_drop, n.is_drop)
+                       FROM
+                               networks
+                       WHERE
+                               networks.network = n.network
+                       AND
+                               networks.row = 1
+                       """,
+               )
+
+               # Apply country and flags from AS feeds
+               self.db.execute("""
+                       WITH networks AS (
+                               SELECT
+                                       n.network,
+                                       autnum_feeds.country,
+
+                                       -- Flags
+                                       autnum_feeds.is_anonymous_proxy,
+                                       autnum_feeds.is_satellite_provider,
+                                       autnum_feeds.is_anycast,
+                                       autnum_feeds.is_drop
+                               FROM
+                                       n
+                               JOIN
+                                       autnum_feeds
+                               ON
+                                       autnum_feeds.number = n.autnum
+                       )
+
+                       UPDATE
+                               n
+                       SET
+                               country =
+                                       COALESCE(networks.country, n.country),
+
+                               is_anonymous_proxy =
+                                       COALESCE(networks.is_anonymous_proxy, n.is_anonymous_proxy),
+
+                               is_satellite_provider =
+                                       COALESCE(networks.is_satellite_provider, n.is_satellite_provider),
+
+                               is_anycast =
+                                       COALESCE(networks.is_anycast, n.is_anycast),
+
+                               is_drop =
+                                       COALESCE(networks.is_drop, n.is_drop)
+                       FROM
+                               networks
+                       WHERE
+                               networks.network = n.network
+               """)
+
+               # Apply network overrides
+               self.db.execute("""
+                       WITH networks AS (
+                               SELECT
+                                       n.network,
+                                       network_overrides.country,
+
+                                       -- Flags
+                                       network_overrides.is_anonymous_proxy,
+                                       network_overrides.is_satellite_provider,
+                                       network_overrides.is_anycast,
+                                       network_overrides.is_drop,
+
+                                       ROW_NUMBER()
+                                       OVER
+                                       (
+                                               PARTITION BY
+                                                       n.network
+                                               ORDER BY
+                                                       masklen(network_overrides.network) DESC
+                                       ) AS row
+                               FROM
+                                       n
+                               JOIN
+                                       network_overrides
+                               ON
+                                       network_overrides.network >>= n.network
+                       )
+
+                       UPDATE
+                               n
+                       SET
+                               country =
+                                       COALESCE(networks.country, n.country),
+
+                               is_anonymous_proxy =
+                                       COALESCE(networks.is_anonymous_proxy, n.is_anonymous_proxy),
+
+                               is_satellite_provider =
+                                       COALESCE(networks.is_satellite_provider, n.is_satellite_provider),
+
+                               is_anycast =
+                                       COALESCE(networks.is_anycast, n.is_anycast),
+
+                               is_drop =
+                                       COALESCE(networks.is_drop, n.is_drop)
+                       FROM
+                               networks
+                       WHERE
+                               networks.network = n.network
+                       AND
+                               networks.row = 1
+               """)
+
+               # Apply AS overrides
+               self.db.execute("""
+                       WITH networks AS (
+                               SELECT
+                                       n.network,
+                                       autnum_overrides.country,
+
+                                       -- Flags
+                                       autnum_overrides.is_anonymous_proxy,
+                                       autnum_overrides.is_satellite_provider,
+                                       autnum_overrides.is_anycast,
+                                       autnum_overrides.is_drop
+                               FROM
+                                       n
+                               JOIN
+                                       autnum_overrides
+                               ON
+                                       autnum_overrides.number = n.autnum
+                       )
+
+                       UPDATE
+                               n
+                       SET
+                               country =
+                                       COALESCE(networks.country, n.country),
+
+                               is_anonymous_proxy =
+                                       COALESCE(networks.is_anonymous_proxy, n.is_anonymous_proxy),
+
+                               is_satellite_provider =
+                                       COALESCE(networks.is_satellite_provider, n.is_satellite_provider),
+
+                               is_anycast =
+                                       COALESCE(networks.is_anycast, n.is_anycast),
+
+                               is_drop =
+                                       COALESCE(networks.is_drop, n.is_drop)
+                       FROM
+                               networks
+                       WHERE
+                               networks.network = n.network
+               """)
+
+               # Here we could remove some networks that we no longer need, but since we
+               # already have implemented our deduplication/merge algorithm this would not
+               # be necessary.
+
+               # Export the entire temporary table
+               rows = self.db.query("""
+                       SELECT
+                               *
+                       FROM
+                               n
+                       ORDER BY
+                               network
+               """)
+
+               for row in rows:
+                       network = writer.add_network("%s" % row.network)
+
+                       # Save country
+                       if row.country:
+                               network.country_code = row.country
+
+                       # Save ASN
+                       if row.autnum:
+                               network.asn = row.autnum
+
+                       # Set flags
+                       if row.is_anonymous_proxy:
+                               network.set_flag(location.NETWORK_FLAG_ANONYMOUS_PROXY)
+
+                       if row.is_satellite_provider:
+                               network.set_flag(location.NETWORK_FLAG_SATELLITE_PROVIDER)
+
+                       if row.is_anycast:
+                               network.set_flag(location.NETWORK_FLAG_ANYCAST)
+
+                       if row.is_drop:
+                               network.set_flag(location.NETWORK_FLAG_DROP)
+
+               # Add all countries
+               log.info("Writing countries...")
+
+               # Select all countries
+               rows = self.db.query("""
+                       SELECT
+                               *
+                       FROM
+                               countries
+                       ORDER BY
+                               country_code
+                       """,
+               )
+
+               for row in rows:
+                       c = writer.add_country(row.country_code)
+                       c.continent_code = row.continent_code
+                       c.name = row.name
+
+               # Write everything to file
+               log.info("Writing database to file...")
+               for file in ns.file:
+                       writer.write(file)
+
+       async def handle_update_whois(self, ns):
+               # Did we run successfully?
+               success = True
+
+               sources = (
+                       # African Network Information Centre
+                       ("AFRINIC", (
+                               (self._import_standard_format, "https://ftp.afrinic.net/pub/pub/dbase/afrinic.db.gz"),
+                       )),
+
+                       # Asia Pacific Network Information Centre
+                       ("APNIC", (
+                               (self._import_standard_format, "https://ftp.apnic.net/apnic/whois/apnic.db.inet6num.gz"),
+                               (self._import_standard_format, "https://ftp.apnic.net/apnic/whois/apnic.db.inetnum.gz"),
+                               (self._import_standard_format, "https://ftp.apnic.net/apnic/whois/apnic.db.aut-num.gz"),
+                               (self._import_standard_format, "https://ftp.apnic.net/apnic/whois/apnic.db.organisation.gz"),
+                       )),
+
+                       # American Registry for Internet Numbers
+                       ("ARIN", (
+                               (self._import_extended_format, "https://ftp.arin.net/pub/stats/arin/delegated-arin-extended-latest"),
+                               (self._import_arin_as_names,   "https://ftp.arin.net/pub/resource_registry_service/asns.csv"),
+                       )),
+
+                       # Japan Network Information Center
+                       ("JPNIC", (
+                               (self._import_standard_format, "https://ftp.nic.ad.jp/jpirr/jpirr.db.gz"),
+                       )),
+
+                       # Latin America and Caribbean Network Information Centre
+                       ("LACNIC", (
+                               (self._import_standard_format, "https://ftp.lacnic.net/lacnic/dbase/lacnic.db.gz"),
+                               (self._import_extended_format, "https://ftp.lacnic.net/pub/stats/lacnic/delegated-lacnic-extended-latest"),
+                       )),
+
+                       # Réseaux IP Européens
+                       ("RIPE", (
+                               (self._import_standard_format, "https://ftp.ripe.net/ripe/dbase/split/ripe.db.inet6num.gz"),
+                               (self._import_standard_format, "https://ftp.ripe.net/ripe/dbase/split/ripe.db.inetnum.gz"),
+                               (self._import_standard_format, "https://ftp.ripe.net/ripe/dbase/split/ripe.db.aut-num.gz"),
+                               (self._import_standard_format, "https://ftp.ripe.net/ripe/dbase/split/ripe.db.organisation.gz"),
+                       )),
+               )
+
+               # Fetch all valid country codes to check parsed networks against
+               countries = self.fetch_countries()
+
+               # Check if we have countries
+               if not countries:
+                       log.error("Please import countries before importing any WHOIS data")
+                       return 1
+
+               # Iterate over all potential sources
+               for name, feeds in sources:
+                       # Skip anything that should not be updated
+                       if ns.sources and not name in ns.sources:
+                               continue
+
+                       try:
+                               await self._process_source(name, feeds, countries)
+
+                       # Log an error but continue if an exception occurs
+                       except Exception as e:
+                               log.error("Error processing source %s" % name, exc_info=True)
+                               success = False
+
+               # Return a non-zero exit code for errors
+               return 0 if success else 1
+
+       async def _process_source(self, source, feeds, countries):
+               """
+                       This function processes one source
+               """
+               # Wrap everything into one large transaction
+               with self.db.transaction():
+                       # Remove all previously imported content
+                       self.db.execute("DELETE FROM autnums          WHERE source = %s", source)
+                       self.db.execute("DELETE FROM networks         WHERE source = %s", source)
+                       self.db.execute("DELETE FROM network_geofeeds WHERE source = %s", source)
+
+                       # Create some temporary tables to store parsed data
+                       self.db.execute("""
+                               CREATE TEMPORARY TABLE _autnums(number integer NOT NULL,
+                                       organization text NOT NULL, source text NOT NULL) ON COMMIT DROP;
+                               CREATE UNIQUE INDEX _autnums_number ON _autnums(number);
+
+                               CREATE TEMPORARY TABLE _organizations(handle text NOT NULL,
+                                       name text NOT NULL, source text NOT NULL) ON COMMIT DROP;
+                               CREATE UNIQUE INDEX _organizations_handle ON _organizations(handle);
+
+                               CREATE TEMPORARY TABLE _rirdata(network inet NOT NULL, country text,
+                                       original_countries text[] NOT NULL, source text NOT NULL)
+                                       ON COMMIT DROP;
+                               CREATE INDEX _rirdata_search ON _rirdata
+                                       USING BTREE(family(network), masklen(network));
+                               CREATE UNIQUE INDEX _rirdata_network ON _rirdata(network);
+                       """)
+
+                       # Parse all feeds
+                       for callback, url, *args in feeds:
+                               # Retrieve the feed
+                               f = self.downloader.retrieve(url)
+
+                               # Call the callback
+                               with self.db.pipeline():
+                                       await callback(source, countries, f, *args)
+
+                       # Process all parsed networks from every RIR we happen to have access to,
+                       # insert the largest network chunks into the networks table immediately...
+                       families = self.db.query("""
+                               SELECT DISTINCT
+                                       family(network) AS family
+                               FROM
+                                       _rirdata
+                               ORDER BY
+                                       family(network)
+                               """,
+                       )
+
+                       for family in (row.family for row in families):
+                               # Fetch the smallest mask length in our data set
+                               smallest = self.db.get("""
+                                       SELECT
+                                               MIN(
+                                                       masklen(network)
+                                               ) AS prefix
+                                       FROM
+                                               _rirdata
+                                       WHERE
+                                               family(network) = %s
+                                       """, family,
+                               )
+
+                               # Copy all networks
+                               self.db.execute("""
+                                       INSERT INTO
+                                               networks
+                                       (
+                                               network,
+                                               country,
+                                               original_countries,
+                                               source
+                                       )
+                                       SELECT
+                                               network,
+                                               country,
+                                               original_countries,
+                                               source
+                                       FROM
+                                               _rirdata
+                                       WHERE
+                                               masklen(network) = %s
+                                       AND
+                                               family(network) = %s
+                                       ON CONFLICT DO
+                                               NOTHING""",
+                                       smallest.prefix,
+                                       family,
+                               )
+
+                               # ... determine any other prefixes for this network family, ...
+                               prefixes = self.db.query("""
+                                       SELECT
+                                               DISTINCT masklen(network) AS prefix
+                                       FROM
+                                               _rirdata
+                                       WHERE
+                                               family(network) = %s
+                                       ORDER BY
+                                               masklen(network) ASC
+                                       OFFSET 1
+                                       """, family,
+                               )
+
+                               # ... and insert networks with this prefix in case they provide additional
+                               # information (i. e. subnet of a larger chunk with a different country)
+                               for prefix in (row.prefix for row in prefixes):
+                                       self.db.execute("""
+                                               WITH candidates AS (
+                                                       SELECT
+                                                               _rirdata.network,
+                                                               _rirdata.country,
+                                                               _rirdata.original_countries,
+                                                               _rirdata.source
+                                                       FROM
+                                                               _rirdata
+                                                       WHERE
+                                                               family(_rirdata.network) = %s
+                                                       AND
+                                                               masklen(_rirdata.network) = %s
+                                               ),
+                                               filtered AS (
+                                                       SELECT
+                                                               DISTINCT ON (c.network)
+                                                               c.network,
+                                                               c.country,
+                                                               c.original_countries,
+                                                               c.source,
+                                                               masklen(networks.network),
+                                                               networks.country AS parent_country
+                                                       FROM
+                                                               candidates c
+                                                       LEFT JOIN
+                                                               networks
+                                                       ON
+                                                               c.network << networks.network
+                                                       ORDER BY
+                                                               c.network,
+                                                               masklen(networks.network) DESC NULLS LAST
+                                               )
+                                               INSERT INTO
+                                                       networks(network, country, original_countries, source)
+                                               SELECT
+                                                       network,
+                                                       country,
+                                                       original_countries,
+                                                       source
+                                               FROM
+                                                       filtered
+                                               WHERE
+                                                       parent_country IS NULL
+                                               OR
+                                                       country <> parent_country
+                                               ON CONFLICT DO NOTHING
+                                               """, family, prefix,
+                                       )
+
+                       self.db.execute("""
+                               INSERT INTO
+                                       autnums
+                               (
+                                       number,
+                                       name,
+                                       source
+                               )
+                               SELECT
+                                       _autnums.number,
+                                       _organizations.name,
+                                       _organizations.source
+                               FROM
+                                       _autnums
+                               JOIN
+                                       _organizations ON _autnums.organization = _organizations.handle
+                               ON CONFLICT
+                               (
+                                       number
+                               )
+                               DO UPDATE
+                                       SET name = excluded.name
+                               """,
+                       )
+
+       async def _import_standard_format(self, source, countries, f, *args):
+               """
+                       Imports a single standard format source feed
+               """
+               # Iterate over all blocks
+               for block in iterate_over_blocks(f):
+                       self._parse_block(block, source, countries)
+
+       async def _import_extended_format(self, source, countries, f, *args):
+               # Iterate over all lines
+               for line in iterate_over_lines(f):
+                       self._parse_line(line, source, countries)
+
+       async def _import_arin_as_names(self, source, countries, f, *args):
+               # Wrap the data to text
+               f = io.TextIOWrapper(f)
+
+               # Walk through the file
+               for line in csv.DictReader(f, dialect="arin"):
+                       # Fetch status
+                       status = line.get("Status")
+
+                       # We are only interested in anything managed by ARIN
+                       if not status == "Full Registry Services":
+                               continue
+
+                       # Fetch organization name
+                       name = line.get("Org Name")
+
+                       # Extract ASNs
+                       first_asn = line.get("Start AS Number")
+                       last_asn  = line.get("End AS Number")
+
+                       # Cast to a number
+                       try:
+                               first_asn = int(first_asn)
+                       except TypeError as e:
+                               log.warning("Could not parse ASN '%s'" % first_asn)
+                               continue
+
+                       try:
+                               last_asn = int(last_asn)
+                       except TypeError as e:
+                               log.warning("Could not parse ASN '%s'" % last_asn)
+                               continue
+
+                       # Check if the range is valid
+                       if last_asn < first_asn:
+                               log.warning("Invalid ASN range %s-%s" % (first_asn, last_asn))
+
+                       # Insert everything into the database
+                       for asn in range(first_asn, last_asn + 1):
+                               if not self._check_parsed_asn(asn):
+                                       log.warning("Skipping invalid ASN %s" % asn)
+                                       continue
+
+                               self.db.execute("""
+                                       INSERT INTO
+                                               autnums
+                                       (
+                                               number,
+                                               name,
+                                               source
+                                       )
+                                       VALUES
+                                       (
+                                               %s, %s, %s
+                                       )
+                                       ON CONFLICT
+                                       (
+                                               number
+                                       )
+                                       DO NOTHING
+                                       """, asn, name, "ARIN",
+                               )
+
+       def _check_parsed_network(self, network):
+               """
+                       Assistive function to detect and subsequently sort out parsed
+                       networks from RIR data (both Whois and so-called "extended sources"),
+                       which are or have...
+
+                       (a) not globally routable (RFC 1918 space, et al.)
+                       (b) covering a too large chunk of the IP address space (prefix length
+                               is < 7 for IPv4 networks, and < 10 for IPv6)
+                       (c) "0.0.0.0" or "::" as a network address
+
+                       This unfortunately is necessary due to brain-dead clutter across
+                       various RIR databases, causing mismatches and eventually disruptions.
+
+                       We will return False in case a network is not suitable for adding
+                       it to our database, and True otherwise.
+               """
+               # Check input
+               if isinstance(network, ipaddress.IPv6Network):
+                       pass
+               elif isinstance(network, ipaddress.IPv4Network):
+                       pass
+               else:
+                       raise ValueError("Invalid network: %s (type %s)" % (network, type(network)))
+
+               # Ignore anything that isn't globally routable
+               if not network.is_global:
+                       log.debug("Skipping non-globally routable network: %s" % network)
+                       return False
+
+               # Ignore anything that is unspecified IP range (See RFC 5735 for IPv4 or RFC 2373 for IPv6)
+               elif network.is_unspecified:
+                       log.debug("Skipping unspecified network: %s" % network)
+                       return False
+
+               # IPv6
+               if network.version == 6:
+                       if network.prefixlen < 10:
+                               log.debug("Skipping too big IP chunk: %s" % network)
+                               return False
+
+               # IPv4
+               elif network.version == 4:
+                       if network.prefixlen < 7:
+                               log.debug("Skipping too big IP chunk: %s" % network)
+                               return False
+
+               # In case we have made it here, the network is considered to
+               # be suitable for libloc consumption...
+               return True
+
+       def _check_parsed_asn(self, asn):
+               """
+                       Assistive function to filter Autonomous System Numbers not being suitable
+                       for adding to our database. Returns False in such cases, and True otherwise.
+               """
+
+               for start, end in VALID_ASN_RANGES:
+                       if start <= asn and end >= asn:
+                               return True
+
+               log.info("Supplied ASN %s out of publicly routable ASN ranges" % asn)
+               return False
+
+       def _check_geofeed_url(self, url):
+               """
+                       This function checks if a Geofeed URL is valid.
+
+                       If so, it returns the normalized URL which should be stored instead of
+                       the original one.
+               """
+               # Parse the URL
+               try:
+                       url = urllib.parse.urlparse(url)
+               except ValueError as e:
+                       log.warning("Invalid URL %s: %s" % (url, e))
+                       return
+
+               # Make sure that this is a HTTPS URL
+               if not url.scheme == "https":
+                       log.warning("Skipping Geofeed URL that is not using HTTPS: %s" \
+                               % url.geturl())
+                       return
+
+               # Normalize the URL and convert it back
+               return url.geturl()
+
+       def _parse_block(self, block, source_key, countries):
+               # Get first line to find out what type of block this is
+               line = block[0]
+
+               # aut-num
+               if line.startswith("aut-num:"):
+                       return self._parse_autnum_block(block, source_key)
+
+               # inetnum
+               if line.startswith("inet6num:") or line.startswith("inetnum:"):
+                       return self._parse_inetnum_block(block, source_key, countries)
+
+               # organisation
+               elif line.startswith("organisation:"):
+                       return self._parse_org_block(block, source_key)
+
+       def _parse_autnum_block(self, block, source_key):
+               autnum = {}
+               for line in block:
+                       # Split line
+                       key, val = split_line(line)
+
+                       if key == "aut-num":
+                               m = re.match(r"^(AS|as)(\d+)", val)
+                               if m:
+                                       autnum["asn"] = m.group(2)
+
+                       elif key == "org":
+                               autnum[key] = val.upper()
+
+                       elif key == "descr":
+                               # Save the first description line as well...
+                               if not key in autnum:
+                                       autnum[key] = val
+
+               # Skip empty objects
+               if not autnum or not "asn" in autnum:
+                       return
+
+               # Insert a dummy organisation handle into our temporary organisations
+               # table in case the AS does not have an organisation handle set, but
+               # has a description (a quirk often observed in APNIC area), so we can
+               # later display at least some string for this AS.
+               if not "org" in autnum:
+                       if "descr" in autnum:
+                               autnum["org"] = "LIBLOC-%s-ORGHANDLE" % autnum.get("asn")
+
+                               self.db.execute("INSERT INTO _organizations(handle, name, source) \
+                                       VALUES(%s, %s, %s) ON CONFLICT (handle) DO NOTHING",
+                                       autnum.get("org"), autnum.get("descr"), source_key,
+                               )
+                       else:
+                               log.warning("ASN %s neither has an organisation handle nor a description line set, omitting" % \
+                                               autnum.get("asn"))
+                               return
+
+               # Insert into database
+               self.db.execute("INSERT INTO _autnums(number, organization, source) \
+                       VALUES(%s, %s, %s) ON CONFLICT (number) DO UPDATE SET \
+                               organization = excluded.organization",
+                       autnum.get("asn"), autnum.get("org"), source_key,
+               )
+
+       def _parse_inetnum_block(self, block, source_key, countries):
+               inetnum = {}
+               for line in block:
+                       # Split line
+                       key, val = split_line(line)
+
+                       # Filter any inetnum records which are only referring to IP space
+                       # not managed by that specific RIR...
+                       if key == "netname":
+                               if re.match(r"^(ERX-NETBLOCK|(AFRINIC|ARIN|LACNIC|RIPE)-CIDR-BLOCK|IANA-NETBLOCK-\d{1,3}|NON-RIPE-NCC-MANAGED-ADDRESS-BLOCK|STUB-[\d-]{3,}SLASH\d{1,2})", val.strip()):
+                                       log.debug("Skipping record indicating historic/orphaned data: %s" % val.strip())
+                                       return
+
+                       if key == "inetnum":
+                               start_address, delim, end_address = val.partition("-")
+
+                               # Strip any excess space
+                               start_address, end_address = start_address.rstrip(), end_address.strip()
+
+                               # Handle "inetnum" formatting in LACNIC DB (e.g. "24.152.8/22" instead of "24.152.8.0/22")
+                               if start_address and not (delim or end_address):
+                                       try:
+                                               start_address = ipaddress.ip_network(start_address, strict=False)
+                                       except ValueError:
+                                               start_address = start_address.split("/")
+                                               ldigits = start_address[0].count(".")
+
+                                               # How many octets do we need to add?
+                                               # (LACNIC does not seem to have a /8 or greater assigned, so the following should suffice.)
+                                               if ldigits == 1:
+                                                       start_address = start_address[0] + ".0.0/" + start_address[1]
+                                               elif ldigits == 2:
+                                                       start_address = start_address[0] + ".0/" + start_address[1]
+                                               else:
+                                                       log.warning("Could not recover IPv4 address from line in LACNIC DB format: %s" % line)
+                                                       return
+
+                                               try:
+                                                       start_address = ipaddress.ip_network(start_address, strict=False)
+                                               except ValueError:
+                                                       log.warning("Could not parse line in LACNIC DB format: %s" % line)
+                                                       return
+
+                                       # Enumerate first and last IP address of this network
+                                       end_address = start_address[-1]
+                                       start_address = start_address[0]
+
+                               else:
+                                       # Convert to IP address
+                                       try:
+                                               start_address = ipaddress.ip_address(start_address)
+                                               end_address   = ipaddress.ip_address(end_address)
+                                       except ValueError:
+                                               log.warning("Could not parse line: %s" % line)
+                                               return
+
+                               inetnum["inetnum"] = list(ipaddress.summarize_address_range(start_address, end_address))
+
+                       elif key == "inet6num":
+                               inetnum[key] = [ipaddress.ip_network(val, strict=False)]
+
+                       elif key == "country":
+                               cc = val.upper()
+
+                               # Ignore certain country codes
+                               if cc in IGNORED_COUNTRIES:
+                                       log.debug("Ignoring country code '%s'" % cc)
+                                       continue
+
+                               # Translate country codes
+                               try:
+                                       cc = TRANSLATED_COUNTRIES[cc]
+                               except KeyError:
+                                       pass
+
+                               # Do we know this country?
+                               if not cc in countries:
+                                       log.warning("Skipping invalid country code '%s'" % cc)
+                                       continue
+
+                               try:
+                                       inetnum[key].append(cc)
+                               except KeyError:
+                                       inetnum[key] = [cc]
+
+                       # Parse the geofeed attribute
+                       elif key == "geofeed":
+                               inetnum["geofeed"] = val
+
+                       # Parse geofeed when used as a remark
+                       elif key == "remarks":
+                               m = re.match(r"^(?:Geofeed)\s+(https://.*)", val)
+                               if m:
+                                       inetnum["geofeed"] = m.group(1)
+
+               # Skip empty objects
+               if not inetnum:
+                       return
+
+               # Iterate through all networks enumerated from above, check them for plausibility and insert
+               # them into the database, if _check_parsed_network() succeeded
+               for single_network in inetnum.get("inet6num") or inetnum.get("inetnum"):
+                       if not self._check_parsed_network(single_network):
+                               continue
+
+                       # Fetch the countries or use a list with an empty country
+                       countries = inetnum.get("country", [None])
+
+                       # Insert the network into the database but only use the first country code
+                       for cc in countries:
+                               self.db.execute("""
+                                       INSERT INTO
+                                               _rirdata
+                                       (
+                                               network,
+                                               country,
+                                               original_countries,
+                                               source
+                                       )
+                                       VALUES
+                                       (
+                                               %s, %s, %s, %s
+                                       )
+                                       ON CONFLICT (network)
+                                               DO UPDATE SET country = excluded.country
+                                       """, "%s" % single_network, cc, [cc for cc in countries if cc], source_key,
+                               )
+
+                               # If there are more than one country, we will only use the first one
+                               break
+
+                       # Update any geofeed information
+                       geofeed = inetnum.get("geofeed", None)
+                       if geofeed:
+                               self._parse_geofeed(source_key, geofeed, single_network)
+
+       def _parse_geofeed(self, source, url, single_network):
+               # Check the URL
+               url = self._check_geofeed_url(url)
+               if not url:
+                       return
+
+               # Store/update any geofeeds
+               self.db.execute("""
+                       INSERT INTO
+                               network_geofeeds
+                       (
+                               network,
+                               url,
+                               source
+                       )
+                       VALUES
+                       (
+                               %s, %s, %s
+                       )
+                       ON CONFLICT
+                       (
+                               network, url
+                       )
+                       DO UPDATE SET
+                               source = excluded.source
+                       """, "%s" % single_network, url, source,
+               )
+
+       def _parse_org_block(self, block, source_key):
+               org = {}
+               for line in block:
+                       # Split line
+                       key, val = split_line(line)
+
+                       if key == "organisation":
+                               org[key] = val.upper()
+                       elif key == "org-name":
+                               org[key] = val
+
+               # Skip empty objects
+               if not org:
+                       return
+
+               self.db.execute("INSERT INTO _organizations(handle, name, source) \
+                       VALUES(%s, %s, %s) ON CONFLICT (handle) DO \
+                       UPDATE SET name = excluded.name",
+                       org.get("organisation"), org.get("org-name"), source_key,
+               )
+
+       def _parse_line(self, line, source_key, validcountries=None):
+               # Skip version line
+               if line.startswith("2"):
+                       return
+
+               # Skip comments
+               if line.startswith("#"):
+                       return
+
+               try:
+                       registry, country_code, type, line = line.split("|", 3)
+               except:
+                       log.warning("Could not parse line: %s" % line)
+                       return
+
+               # Skip ASN
+               if type == "asn":
+                       return
+
+               # Skip any unknown protocols
+               elif not type in ("ipv6", "ipv4"):
+                       log.warning("Unknown IP protocol '%s'" % type)
+                       return
+
+               # Skip any lines that are for stats only or do not have a country
+               # code at all (avoids log spam below)
+               if not country_code or country_code == '*':
+                       return
+
+               # Skip objects with unknown country codes
+               if validcountries and country_code not in validcountries:
+                       log.warning("Skipping line with bogus country '%s': %s" % \
+                               (country_code, line))
+                       return
+
+               try:
+                       address, prefix, date, status, organization = line.split("|")
+               except ValueError:
+                       organization = None
+
+                       # Try parsing the line without organization
+                       try:
+                               address, prefix, date, status = line.split("|")
+                       except ValueError:
+                               log.warning("Unhandled line format: %s" % line)
+                               return
+
+               # Skip anything that isn't properly assigned
+               if not status in ("assigned", "allocated"):
+                       return
+
+               # Cast prefix into an integer
+               try:
+                       prefix = int(prefix)
+               except:
+                       log.warning("Invalid prefix: %s" % prefix)
+                       return
+
+               # Fix prefix length for IPv4
+               if type == "ipv4":
+                       prefix = 32 - int(math.log(prefix, 2))
+
+               # Try to parse the address
+               try:
+                       network = ipaddress.ip_network("%s/%s" % (address, prefix), strict=False)
+               except ValueError:
+                       log.warning("Invalid IP address: %s" % address)
+                       return
+
+               if not self._check_parsed_network(network):
+                       return
+
+               self.db.execute("""
+                       INSERT INTO
+                               networks
+                       (
+                               network,
+                               country,
+                               original_countries,
+                               source
+                       )
+                       VALUES
+                       (
+                               %s, %s, %s, %s
+                       )
+                       ON CONFLICT (network)
+                               DO UPDATE SET country = excluded.country
+                       """, "%s" % network, country_code, [country_code], source_key,
+               )
+
+       async def handle_update_announcements(self, ns):
+               server = ns.server[0]
+
+               with self.db.transaction():
+                       if server.startswith("/"):
+                               await self._handle_update_announcements_from_bird(server)
+
+                       # Purge anything we never want here
+                       self.db.execute("""
+                               -- Delete default routes
+                               DELETE FROM announcements WHERE network = '::/0' OR network = '0.0.0.0/0';
+
+                               -- Delete anything that is not global unicast address space
+                               DELETE FROM announcements WHERE family(network) = 6 AND NOT network <<= '2000::/3';
+
+                               -- DELETE "current network" address space
+                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '0.0.0.0/8';
+
+                               -- DELETE local loopback address space
+                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '127.0.0.0/8';
+
+                               -- DELETE RFC 1918 address space
+                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '10.0.0.0/8';
+                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '172.16.0.0/12';
+                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '192.168.0.0/16';
+
+                               -- DELETE test, benchmark and documentation address space
+                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '192.0.0.0/24';
+                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '192.0.2.0/24';
+                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '198.18.0.0/15';
+                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '198.51.100.0/24';
+                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '203.0.113.0/24';
+
+                               -- DELETE CGNAT address space (RFC 6598)
+                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '100.64.0.0/10';
+
+                               -- DELETE link local address space
+                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '169.254.0.0/16';
+
+                               -- DELETE IPv6 to IPv4 (6to4) address space (RFC 3068)
+                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '192.88.99.0/24';
+                               DELETE FROM announcements WHERE family(network) = 6 AND network <<= '2002::/16';
+
+                               -- DELETE multicast and reserved address space
+                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '224.0.0.0/4';
+                               DELETE FROM announcements WHERE family(network) = 4 AND network <<= '240.0.0.0/4';
+
+                               -- Delete networks that are too small to be in the global routing table
+                               DELETE FROM announcements WHERE family(network) = 6 AND masklen(network) > 48;
+                               DELETE FROM announcements WHERE family(network) = 4 AND masklen(network) > 24;
+
+                               -- Delete any non-public or reserved ASNs
+                               DELETE FROM announcements WHERE NOT (
+                                       (autnum >= 1 AND autnum <= 23455)
+                                       OR
+                                       (autnum >= 23457 AND autnum <= 64495)
+                                       OR
+                                       (autnum >= 131072 AND autnum <= 4199999999)
+                               );
+
+                               -- Delete everything that we have not seen for 14 days
+                               DELETE FROM announcements WHERE last_seen_at <= CURRENT_TIMESTAMP - INTERVAL '14 days';
+                       """)
+
+       async def _handle_update_announcements_from_bird(self, server):
+               # Pre-compile the regular expression for faster searching
+               route = re.compile(b"^\s(.+?)\s+.+?\[(?:AS(.*?))?.\]$")
+
+               log.info("Requesting routing table from Bird (%s)" % server)
+
+               aggregated_networks = []
+
+               # Send command to list all routes
+               for line in self._bird_cmd(server, "show route"):
+                       m = route.match(line)
+                       if not m:
+                               # Skip empty lines
+                               if not line:
+                                       pass
+
+                               # Ignore any header lines with the name of the routing table
+                               elif line.startswith(b"Table"):
+                                       pass
+
+                               # Log anything else
+                               else:
+                                       log.debug("Could not parse line: %s" % line.decode())
+
+                               continue
+
+                       # Fetch the extracted network and ASN
+                       network, autnum = m.groups()
+
+                       # Decode into strings
+                       if network:
+                               network = network.decode()
+                       if autnum:
+                               autnum = autnum.decode()
+
+                       # Collect all aggregated networks
+                       if not autnum:
+                               log.debug("%s is an aggregated network" % network)
+                               aggregated_networks.append(network)
+                               continue
+
+                       # Insert it into the database
+                       self.db.execute("INSERT INTO announcements(network, autnum) \
+                               VALUES(%s, %s) ON CONFLICT (network) DO \
+                               UPDATE SET autnum = excluded.autnum, last_seen_at = CURRENT_TIMESTAMP",
+                               network, autnum,
+                       )
+
+               # Process any aggregated networks
+               for network in aggregated_networks:
+                       log.debug("Processing aggregated network %s" % network)
+
+                       # Run "show route all" for each network
+                       for line in self._bird_cmd(server, "show route %s all" % network):
+                               # Try finding the path
+                               m = re.match(b"\s+BGP\.as_path:.* (\d+) {\d+}$", line)
+                               if m:
+                                       # Select the last AS number in the path
+                                       autnum = m.group(1).decode()
+
+                                       # Insert it into the database
+                                       self.db.execute("INSERT INTO announcements(network, autnum) \
+                                               VALUES(%s, %s) ON CONFLICT (network) DO \
+                                               UPDATE SET autnum = excluded.autnum, last_seen_at = CURRENT_TIMESTAMP",
+                                               network, autnum,
+                                       )
+
+                                       # We don't need to process any more
+                                       break
+
+       def _bird_cmd(self, socket_path, command):
+               # Connect to the socket
+               s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+               s.connect(socket_path)
+
+               # Allocate some buffer
+               buffer = b""
+
+               log.debug("Sending Bird command: %s" % command)
+
+               # Send the command
+               s.send(b"%s\n" % command.encode())
+
+               while True:
+                       # Fill up the buffer
+                       buffer += s.recv(4096)
+
+                       while True:
+                               # Search for the next newline
+                               pos = buffer.find(b"\n")
+
+                               # If we cannot find one, we go back and read more data
+                               if pos <= 0:
+                                       break
+
+                               # Cut after the newline character
+                               pos += 1
+
+                               # Split the line we want and keep the rest in buffer
+                               line, buffer = buffer[:pos], buffer[pos:]
+
+                               # Try parsing any status lines
+                               if len(line) > 4 and line[:4].isdigit() and line[4] in (32, 45):
+                                       code, delim, line = int(line[:4]), line[4], line[5:]
+
+                                       log.debug("Received response code %s from bird" % code)
+
+                                       # End of output
+                                       if code == 0:
+                                               return
+
+                                       # Ignore hello line
+                                       elif code == 1:
+                                               continue
+
+                               # Otherwise return the line
+                               yield line
+
+       async def handle_update_geofeeds(self, ns):
+               # Sync geofeeds
+               with self.db.transaction():
+                       # Delete all geofeeds which are no longer linked
+                       self.db.execute("""
+                               DELETE FROM
+                                       geofeeds
+                               WHERE
+                                       geofeeds.url NOT IN (
+                                               SELECT
+                                                       network_geofeeds.url
+                                               FROM
+                                                       network_geofeeds
+                                       )
+                               """,
+                       )
+
+                       # Copy all geofeeds
+                       self.db.execute("""
+                               WITH all_geofeeds AS (
+                                       SELECT
+                                               network_geofeeds.url
+                                       FROM
+                                               network_geofeeds
+                               )
+                               INSERT INTO
+                                       geofeeds
+                               (
+                                       url
+                               )
+                               SELECT
+                                       url
+                               FROM
+                                       all_geofeeds
+                               ON CONFLICT (url)
+                                       DO NOTHING
+                               """,
+                       )
+
+               # Fetch all Geofeeds that require an update
+               geofeeds = self.db.query("""
+                       SELECT
+                               id,
+                               url
+                       FROM
+                               geofeeds
+                       WHERE
+                               updated_at IS NULL
+                       OR
+                               updated_at <= CURRENT_TIMESTAMP - INTERVAL '1 week'
+                       ORDER BY
+                               id
+               """)
+
+               ratelimiter = asyncio.Semaphore(32)
+
+               # Update all geofeeds
+               async with asyncio.TaskGroup() as tasks:
+                       for geofeed in geofeeds:
+                               task = tasks.create_task(
+                                       self._fetch_geofeed(ratelimiter, geofeed),
+                               )
+
+               # Delete data from any feeds that did not update in the last two weeks
+               with self.db.transaction():
+                       self.db.execute("""
+                               DELETE FROM
+                                       geofeed_networks
+                               WHERE
+                                       geofeed_networks.geofeed_id IN (
+                                               SELECT
+                                                       geofeeds.id
+                                               FROM
+                                                       geofeeds
+                                               WHERE
+                                                       updated_at IS NULL
+                                               OR
+                                                       updated_at <= CURRENT_TIMESTAMP - INTERVAL '2 weeks'
+                                       )
+                       """)
+
+       async def _fetch_geofeed(self, ratelimiter, geofeed):
+               async with ratelimiter:
+                       log.debug("Fetching Geofeed %s" % geofeed.url)
+
+                       with self.db.transaction():
+                               # Open the URL
+                               try:
+                                       # Send the request
+                                       f = await asyncio.to_thread(
+                                               self.downloader.retrieve,
+
+                                               # Fetch the feed by its URL
+                                               geofeed.url,
+
+                                               # Send some extra headers
+                                               headers={
+                                                       "User-Agent" : "location/%s" % location.__version__,
+
+                                                       # We expect some plain text file in CSV format
+                                                       "Accept"     : "text/csv, text/plain",
+                                               },
+
+                                               # Don't wait longer than 10 seconds for a response
+                                               timeout=10,
+                                       )
+
+                                       # Remove any previous data
+                                       self.db.execute("DELETE FROM geofeed_networks \
+                                               WHERE geofeed_id = %s", geofeed.id)
+
+                                       lineno = 0
+
+                                       # Read the output line by line
+                                       with self.db.pipeline():
+                                               for line in f:
+                                                       lineno += 1
+
+                                                       try:
+                                                               line = line.decode()
+
+                                                       # Ignore any lines we cannot decode
+                                                       except UnicodeDecodeError:
+                                                               log.debug("Could not decode line %s in %s" \
+                                                                       % (lineno, geofeed.url))
+                                                               continue
+
+                                                       # Strip any newline
+                                                       line = line.rstrip()
+
+                                                       # Skip empty lines
+                                                       if not line:
+                                                               continue
+
+                                                       # Skip comments
+                                                       elif line.startswith("#"):
+                                                               continue
+
+                                                       # Try to parse the line
+                                                       try:
+                                                               fields = line.split(",", 5)
+                                                       except ValueError:
+                                                               log.debug("Could not parse line: %s" % line)
+                                                               continue
+
+                                                       # Check if we have enough fields
+                                                       if len(fields) < 4:
+                                                               log.debug("Not enough fields in line: %s" % line)
+                                                               continue
+
+                                                       # Fetch all fields
+                                                       network, country, region, city, = fields[:4]
+
+                                                       # Try to parse the network
+                                                       try:
+                                                               network = ipaddress.ip_network(network, strict=False)
+                                                       except ValueError:
+                                                               log.debug("Could not parse network: %s" % network)
+                                                               continue
+
+                                                       # Strip any excess whitespace from country codes
+                                                       country = country.strip()
+
+                                                       # Make the country code uppercase
+                                                       country = country.upper()
+
+                                                       # Check the country code
+                                                       if not country:
+                                                               log.debug("Empty country code in Geofeed %s line %s" \
+                                                                       % (geofeed.url, lineno))
+                                                               continue
+
+                                                       elif not location.country_code_is_valid(country):
+                                                               log.debug("Invalid country code in Geofeed %s:%s: %s" \
+                                                                       % (geofeed.url, lineno, country))
+                                                               continue
+
+                                                       # Write this into the database
+                                                       self.db.execute("""
+                                                               INSERT INTO
+                                                                       geofeed_networks (
+                                                                               geofeed_id,
+                                                                               network,
+                                                                               country,
+                                                                               region,
+                                                                               city
+                                                                       )
+                                                               VALUES (%s, %s, %s, %s, %s)""",
+                                                               geofeed.id,
+                                                               "%s" % network,
+                                                               country,
+                                                               region,
+                                                               city,
+                                                       )
+
+                               # Catch any HTTP errors
+                               except urllib.request.HTTPError as e:
+                                       self.db.execute("UPDATE geofeeds SET status = %s, error = %s \
+                                               WHERE id = %s", e.code, "%s" % e, geofeed.id)
+
+                                       # Remove any previous data when the feed has been deleted
+                                       if e.code == 404:
+                                               self.db.execute("DELETE FROM geofeed_networks \
+                                                       WHERE geofeed_id = %s", geofeed.id)
+
+                               # Catch any other errors and connection timeouts
+                               except (http.client.InvalidURL, urllib.request.URLError, TimeoutError) as e:
+                                       log.debug("Could not fetch URL %s: %s" % (geofeed.url, e))
+
+                                       self.db.execute("UPDATE geofeeds SET status = %s, error = %s \
+                                               WHERE id = %s", 599, "%s" % e, geofeed.id)
+
+                               # Mark the geofeed as updated
+                               else:
+                                       self.db.execute("""
+                                               UPDATE
+                                                       geofeeds
+                                               SET
+                                                       updated_at = CURRENT_TIMESTAMP,
+                                                       status = NULL,
+                                                       error = NULL
+                                               WHERE
+                                                       id = %s""",
+                                               geofeed.id,
+                                       )
+
+       async def handle_update_overrides(self, ns):
+               with self.db.transaction():
+                       # Drop any previous content
+                       self.db.execute("TRUNCATE TABLE autnum_overrides")
+                       self.db.execute("TRUNCATE TABLE network_overrides")
+
+                       # Remove all Geofeeds
+                       self.db.execute("DELETE FROM network_geofeeds WHERE source = %s", "overrides")
+
+                       for file in ns.files:
+                               log.info("Reading %s..." % file)
+
+                               with open(file, "rb") as f:
+                                       for type, block in read_blocks(f):
+                                               if type == "net":
+                                                       network = block.get("net")
+                                                       # Try to parse and normalise the network
+                                                       try:
+                                                               network = ipaddress.ip_network(network, strict=False)
+                                                       except ValueError as e:
+                                                               log.warning("Invalid IP network: %s: %s" % (network, e))
+                                                               continue
+
+                                                       # Prevent that we overwrite all networks
+                                                       if network.prefixlen == 0:
+                                                               log.warning("Skipping %s: You cannot overwrite default" % network)
+                                                               continue
+
+                                                       self.db.execute("""
+                                                               INSERT INTO
+                                                                       network_overrides
+                                                               (
+                                                                       network,
+                                                                       country,
+                                                                       is_anonymous_proxy,
+                                                                       is_satellite_provider,
+                                                                       is_anycast,
+                                                                       is_drop
+                                                               )
+                                                               VALUES
+                                                               (
+                                                                       %s, %s, %s, %s, %s, %s
+                                                               )
+                                                               ON CONFLICT (network) DO NOTHING
+                                                               """,
+                                                               "%s" % network,
+                                                               block.get("country"),
+                                                               self._parse_bool(block, "is-anonymous-proxy"),
+                                                               self._parse_bool(block, "is-satellite-provider"),
+                                                               self._parse_bool(block, "is-anycast"),
+                                                               self._parse_bool(block, "drop"),
+                                                       )
+
+                                               elif type == "aut-num":
+                                                       autnum = block.get("aut-num")
+
+                                                       # Check if AS number begins with "AS"
+                                                       if not autnum.startswith("AS"):
+                                                               log.warning("Invalid AS number: %s" % autnum)
+                                                               continue
+
+                                                       # Strip "AS"
+                                                       autnum = autnum[2:]
+
+                                                       self.db.execute("""
+                                                               INSERT INTO
+                                                                       autnum_overrides
+                                                               (
+                                                                       number,
+                                                                       name,
+                                                                       country,
+                                                                       is_anonymous_proxy,
+                                                                       is_satellite_provider,
+                                                                       is_anycast,
+                                                                       is_drop
+                                                               )
+                                                               VALUES
+                                                               (
+                                                                       %s, %s, %s, %s, %s, %s, %s
+                                                               )
+                                                               ON CONFLICT (number) DO NOTHING
+                                                               """,
+                                                               autnum,
+                                                               block.get("name"),
+                                                               block.get("country"),
+                                                               self._parse_bool(block, "is-anonymous-proxy"),
+                                                               self._parse_bool(block, "is-satellite-provider"),
+                                                               self._parse_bool(block, "is-anycast"),
+                                                               self._parse_bool(block, "drop"),
+                                                       )
+
+                                               # Geofeeds
+                                               elif type == "geofeed":
+                                                       networks = []
+
+                                                       # Fetch the URL
+                                                       url = block.get("geofeed")
+
+                                                       # Fetch permitted networks
+                                                       for n in block.get("network", []):
+                                                               try:
+                                                                       n = ipaddress.ip_network(n)
+                                                               except ValueError as e:
+                                                                       log.warning("Ignoring invalid network %s: %s" % (n, e))
+                                                                       continue
+
+                                                               networks.append(n)
+
+                                                       # If no networks have been specified, permit for everything
+                                                       if not networks:
+                                                               networks = [
+                                                                       ipaddress.ip_network("::/0"),
+                                                                       ipaddress.ip_network("0.0.0.0/0"),
+                                                               ]
+
+                                                       # Check the URL
+                                                       url = self._check_geofeed_url(url)
+                                                       if not url:
+                                                               continue
+
+                                                       # Store the Geofeed URL
+                                                       self.db.execute("""
+                                                               INSERT INTO
+                                                                       geofeeds
+                                                               (
+                                                                       url
+                                                               )
+                                                               VALUES
+                                                               (
+                                                                       %s
+                                                               )
+                                                               ON CONFLICT (url) DO NOTHING
+                                                               """, url,
+                                                       )
+
+                                                       # Store all permitted networks
+                                                       self.db.executemany("""
+                                                               INSERT INTO
+                                                                       network_geofeeds
+                                                               (
+                                                                       network,
+                                                                       url,
+                                                                       source
+                                                               )
+                                                               VALUES
+                                                               (
+                                                                       %s, %s, %s
+                                                               )
+                                                               ON CONFLICT
+                                                               (
+                                                                       network, url
+                                                               )
+                                                               DO UPDATE SET
+                                                                       source = excluded.source
+                                                               """, (("%s" % n, url, "overrides") for n in networks),
+                                                       )
+
+                                               else:
+                                                       log.warning("Unsupported type: %s" % type)
+
+       async def handle_update_feeds(self, ns):
+               """
+                       Update any third-party feeds
+               """
+               success = True
+
+               feeds = (
+                       # AWS IP Ranges
+                       ("AWS-IP-RANGES", self._import_aws_ip_ranges, "https://ip-ranges.amazonaws.com/ip-ranges.json"),
+
+                       # Spamhaus DROP
+                       ("SPAMHAUS-DROP",   self._import_spamhaus_drop, "https://www.spamhaus.org/drop/drop.txt"),
+                       ("SPAMHAUS-DROPV6", self._import_spamhaus_drop, "https://www.spamhaus.org/drop/dropv6.txt"),
+
+                       # Spamhaus ASNDROP
+                       ("SPAMHAUS-ASNDROP", self._import_spamhaus_asndrop, "https://www.spamhaus.org/drop/asndrop.json"),
+               )
+
+               # Drop any data from feeds that we don't support (any more)
+               with self.db.transaction():
+                       # Fetch the names of all feeds we support
+                       sources = [name for name, *rest in feeds]
+
+                       self.db.execute("DELETE FROM autnum_feeds  WHERE NOT source = ANY(%s)", sources)
+                       self.db.execute("DELETE FROM network_feeds WHERE NOT source = ANY(%s)", sources)
+
+               # Walk through all feeds
+               for name, callback, url, *args in feeds:
+                       # Skip any feeds that were not requested on the command line
+                       if ns.feeds and not name in ns.feeds:
+                               continue
+
+                       try:
+                               await self._process_feed(name, callback, url, *args)
+
+                       # Log an error but continue if an exception occurs
+                       except Exception as e:
+                               log.error("Error processing feed '%s': %s" % (name, e))
+                               success = False
+
+               # Return status
+               return 0 if success else 1
+
+       async def _process_feed(self, name, callback, url, *args):
+               """
+                       Processes one feed
+               """
+               # Open the URL
+               f = self.downloader.retrieve(url)
+
+               with self.db.transaction():
+                       # Drop any previous content
+                       self.db.execute("DELETE FROM autnum_feeds  WHERE source = %s", name)
+                       self.db.execute("DELETE FROM network_feeds WHERE source = %s", name)
+
+                       # Call the callback to process the feed
+                       with self.db.pipeline():
+                               return await callback(name, f, *args)
+
+       async def _import_aws_ip_ranges(self, name, f):
+               # Parse the feed
+               feed = json.load(f)
+
+               # Set up a dictionary for mapping a region name to a country. Unfortunately,
+               # there seems to be no machine-readable version available of this other than
+               # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html
+               # (worse, it seems to be incomplete :-/ ); https://www.cloudping.cloud/endpoints
+               # was helpful here as well.
+               aws_region_country_map = {
+                       # Africa
+                       "af-south-1"     : "ZA",
+
+                       # Asia
+                       "il-central-1"   : "IL", # Tel Aviv
+
+                       # Asia/Pacific
+                       "ap-northeast-1" : "JP",
+                       "ap-northeast-2" : "KR",
+                       "ap-northeast-3" : "JP",
+                       "ap-east-1"      : "HK",
+                       "ap-south-1"     : "IN",
+                       "ap-south-2"     : "IN",
+                       "ap-southeast-1" : "SG",
+                       "ap-southeast-2" : "AU",
+                       "ap-southeast-3" : "MY",
+                       "ap-southeast-4" : "AU",
+                       "ap-southeast-5" : "NZ", # Auckland, NZ
+                       "ap-southeast-6" : "AP", # XXX: Precise location not documented anywhere
+
+                       # Canada
+                       "ca-central-1"   : "CA",
+                       "ca-west-1"      : "CA",
+
+                       # Europe
+                       "eu-central-1"   : "DE",
+                       "eu-central-2"   : "CH",
+                       "eu-north-1"     : "SE",
+                       "eu-west-1"      : "IE",
+                       "eu-west-2"      : "GB",
+                       "eu-west-3"      : "FR",
+                       "eu-south-1"     : "IT",
+                       "eu-south-2"     : "ES",
+
+                       # Middle East
+                       "me-central-1"   : "AE",
+                       "me-south-1"     : "BH",
+
+                       # South America
+                       "sa-east-1"      : "BR",
+
+                       # Undocumented, likely located in Berlin rather than Frankfurt
+                       "eusc-de-east-1" : "DE",
+               }
+
+               # Collect a list of all networks
+               prefixes = feed.get("ipv6_prefixes", []) + feed.get("prefixes", [])
+
+               for prefix in prefixes:
+                       # Fetch network
+                       network = prefix.get("ipv6_prefix") or prefix.get("ip_prefix")
+
+                       # Parse the network
+                       try:
+                               network = ipaddress.ip_network(network)
+                       except ValuleError as e:
+                               log.warning("%s: Unable to parse prefix %s" % (name, network))
+                               continue
+
+                       # Sanitize parsed networks...
+                       if not self._check_parsed_network(network):
+                               continue
+
+                       # Fetch the region
+                       region = prefix.get("region")
+
+                       # Set some defaults
+                       cc = None
+                       is_anycast = False
+
+                       # Fetch the CC from the dictionary
+                       try:
+                               cc = aws_region_country_map[region]
+
+                       # If we couldn't find anything, let's try something else...
+                       except KeyError as e:
+                               # Find anycast networks
+                               if region == "GLOBAL":
+                                       is_anycast = True
+
+                               # Everything that starts with us- is probably in the United States
+                               elif region.startswith("us-"):
+                                       cc = "US"
+
+                               # Everything that starts with cn- is probably China
+                               elif region.startswith("cn-"):
+                                       cc = "CN"
+
+                               # Log a warning for anything else
+                               else:
+                                       log.warning("%s: Could not determine country code for AWS region %s" \
+                                               % (name, region))
+                                       continue
+
+                       # Write to database
+                       self.db.execute("""
+                               INSERT INTO
+                                       network_feeds
+                               (
+                                       network,
+                                       source,
+                                       country,
+                                       is_anycast
+                               )
+                               VALUES
+                               (
+                                       %s, %s, %s, %s
+                               )
+                               ON CONFLICT (network, source) DO NOTHING
+                               """, "%s" % network, name, cc, is_anycast,
+                       )
+
+       async def _import_spamhaus_drop(self, name, f):
+               """
+                       Import Spamhaus DROP IP feeds
+               """
+               # Count all lines
+               lines = 0
+
+               # Walk through all lines
+               for line in f:
+                       # Decode line
+                       line = line.decode("utf-8")
+
+                       # Strip off any comments
+                       line, _, comment = line.partition(";")
+
+                       # Ignore empty lines
+                       if not line:
+                               continue
+
+                       # Strip any excess whitespace
+                       line = line.strip()
+
+                       # Increment line counter
+                       lines += 1
+
+                       # Parse the network
+                       try:
+                               network = ipaddress.ip_network(line)
+                       except ValueError as e:
+                               log.warning("%s: Could not parse network: %s - %s" % (name, line, e))
+                               continue
+
+                       # Check network
+                       if not self._check_parsed_network(network):
+                               log.warning("%s: Skipping bogus network: %s" % (name, network))
+                               continue
+
+                       # Insert into the database
+                       self.db.execute("""
+                               INSERT INTO
+                                       network_feeds
+                               (
+                                       network,
+                                       source,
+                                       is_drop
+                               )
+                               VALUES
+                               (
+                                       %s, %s, %s
+                               )""", "%s" % network, name, True,
+                       )
+
+               # Raise an exception if we could not import anything
+               if not lines:
+                       raise RuntimeError("Received bogus feed %s with no data" % name)
+
+       async def _import_spamhaus_asndrop(self, name, f):
+               """
+                       Import Spamhaus ASNDROP feed
+               """
+               for line in f:
+                       # Decode the line
+                       line = line.decode("utf-8")
+
+                       # Parse JSON
+                       try:
+                               line = json.loads(line)
+                       except json.JSONDecodeError as e:
+                               log.warning("%s: Unable to parse JSON object %s: %s" % (name, line, e))
+                               continue
+
+                       # Fetch type
+                       type = line.get("type")
+
+                       # Skip any metadata
+                       if type == "metadata":
+                               continue
+
+                       # Fetch ASN
+                       asn  = line.get("asn")
+
+                       # Skip any lines without an ASN
+                       if not asn:
+                               continue
+
+                       # Filter invalid ASNs
+                       if not self._check_parsed_asn(asn):
+                               log.warning("%s: Skipping bogus ASN %s" % (name, asn))
+                               continue
+
+                       # Write to database
+                       self.db.execute("""
+                               INSERT INTO
+                                       autnum_feeds
+                               (
+                                       number,
+                                       source,
+                                       is_drop
+                               )
+                               VALUES
+                               (
+                                       %s, %s, %s
+                               )""", "%s" % asn, name, True,
+                       )
+
+       @staticmethod
+       def _parse_bool(block, key):
+               val = block.get(key)
+
+               # There is no point to proceed when we got None
+               if val is None:
+                       return
+
+               # Convert to lowercase
+               val = val.lower()
+
+               # True
+               if val in ("yes", "1"):
+                       return True
+
+               # False
+               if val in ("no", "0"):
+                       return False
+
+               # Default to None
+               return None
+
+       async def handle_import_countries(self, ns):
+               with self.db.transaction():
+                       # Drop all data that we have
+                       self.db.execute("TRUNCATE TABLE countries")
+
+                       for file in ns.file:
+                               for line in file:
+                                       line = line.rstrip()
+
+                                       # Ignore any comments
+                                       if line.startswith("#"):
+                                               continue
+
+                                       try:
+                                               country_code, continent_code, name = line.split(maxsplit=2)
+                                       except:
+                                               log.warning("Could not parse line: %s" % line)
+                                               continue
+
+                                       self.db.execute("INSERT INTO countries(country_code, name, continent_code) \
+                                               VALUES(%s, %s, %s) ON CONFLICT DO NOTHING", country_code, name, continent_code)
+
+
+def split_line(line):
+       key, colon, val = line.partition(":")
+
+       # Strip any excess space
+       key = key.strip()
+       val = val.strip()
+
+       return key, val
+
+def read_blocks(f):
+       for block in iterate_over_blocks(f):
+               type = None
+               data = {}
+
+               for i, line in enumerate(block):
+                       key, value = line.split(":", 1)
+
+                       # The key of the first line defines the type
+                       if i == 0:
+                               type = key
+
+                       # Strip any excess whitespace
+                       value = value.strip()
+
+                       # Store some values as a list
+                       if type == "geofeed" and key == "network":
+                               try:
+                                       data[key].append(value)
+                               except KeyError:
+                                       data[key] = [value]
+
+                       # Otherwise store the value as string
+                       else:
+                               data[key] = value
+
+               yield type, data
+
+def iterate_over_blocks(f, charsets=("utf-8", "latin1")):
+       block = []
+
+       for line in f:
+               # Skip commented lines
+               if line.startswith(b"#") or line.startswith(b"%"):
+                       continue
+
+               # Convert to string
+               for charset in charsets:
+                       try:
+                               line = line.decode(charset)
+                       except UnicodeDecodeError:
+                               continue
+                       else:
+                               break
+
+               # Remove any comments at the end of line
+               line, hash, comment = line.partition("#")
+
+               # Strip any whitespace at the end of the line
+               line = line.rstrip()
+
+               # If we cut off some comment and the line is empty, we can skip it
+               if comment and not line:
+                       continue
+
+               # If the line has some content, keep collecting it
+               if line:
+                       block.append(line)
+                       continue
+
+               # End the block on an empty line
+               if block:
+                       yield block
+
+               # Reset the block
+               block = []
+
+       # Return the last block
+       if block:
+               yield block
+
+def iterate_over_lines(f):
+       for line in f:
+               # Decode the line
+               line = line.decode()
+
+               # Strip the ending
+               yield line.rstrip()
+
+async def main():
+       # Run the command line interface
+       c = CLI()
+
+       await c.run()
+
+asyncio.run(main())
similarity index 93%
rename from src/python/location.in
rename to src/scripts/location.in
index 0c89d75263370952259255a5470aa4884685b1b1..b34cc912105f1c2ce032bea76427aa9e62e69c7f 100644 (file)
@@ -59,7 +59,7 @@ class CLI(object):
 
                # database
                parser.add_argument("--database", "-d",
-                       default="@databasedir@/database.db", help=_("Path to database"),
+                       default=location.DATABASE_PATH, help=_("Path to database"),
                )
 
                # public key
@@ -154,6 +154,15 @@ class CLI(object):
                        choices=location.export.formats.keys(), default="list")
                list_networks_by_flags.set_defaults(func=self.handle_list_networks_by_flags)
 
+               # List bogons
+               list_bogons = subparsers.add_parser("list-bogons",
+                       help=_("Lists all bogons"),
+               )
+               list_bogons.add_argument("--family", choices=("ipv6", "ipv4"))
+               list_bogons.add_argument("--format",
+                       choices=location.export.formats.keys(), default="list")
+               list_bogons.set_defaults(func=self.handle_list_bogons)
+
                # List countries
                list_countries = subparsers.add_parser("list-countries",
                        help=_("Lists all countries"),
@@ -172,7 +181,7 @@ class CLI(object):
                )
                export.add_argument("--format", help=_("Output format"),
                        choices=location.export.formats.keys(), default="list")
-               export.add_argument("--directory", help=_("Output directory"), required=True)
+               export.add_argument("--directory", help=_("Output directory"))
                export.add_argument("--family",
                        help=_("Specify address family"), choices=("ipv6", "ipv4"),
                )
@@ -229,6 +238,11 @@ class CLI(object):
                        sys.stderr.write("%s\n" % e)
                        ret = 2
 
+               # Catch any other exceptions
+               except Exception as e:
+                       sys.stderr.write("%s\n" % e)
+                       ret = 1
+
                # Return with exit code
                if ret:
                        sys.exit(ret)
@@ -467,7 +481,7 @@ class CLI(object):
                                return 1
 
                # Success
-               log.debug("Database successfully verified")
+               log.info("Database successfully verified")
                return 0
 
        def __get_output_formatter(self, ns):
@@ -500,7 +514,7 @@ class CLI(object):
                writer = self.__get_output_formatter(ns)
 
                for asn in ns.asn:
-                       f = writer(sys.stdout, prefix="AS%s" % asn)
+                       f = writer("AS%s" % asn, family=ns.family, f=sys.stdout)
 
                        # Print all matching networks
                        for n in db.search_networks(asns=[asn], family=ns.family):
@@ -513,7 +527,7 @@ class CLI(object):
 
                for country_code in ns.country_code:
                        # Open standard output
-                       f = writer(sys.stdout, prefix=country_code)
+                       f = writer(country_code, family=ns.family, f=sys.stdout)
 
                        # Print all matching networks
                        for n in db.search_networks(country_codes=[country_code], family=ns.family):
@@ -540,13 +554,22 @@ class CLI(object):
                        raise ValueError(_("You must at least pass one flag"))
 
                writer = self.__get_output_formatter(ns)
-               f = writer(sys.stdout, prefix="custom")
+               f = writer("custom", family=ns.family, f=sys.stdout)
 
                for n in db.search_networks(flags=flags, family=ns.family):
                        f.write(n)
 
                f.finish()
 
+       def handle_list_bogons(self, db, ns):
+               writer = self.__get_output_formatter(ns)
+               f = writer("bogons", family=ns.family, f=sys.stdout)
+
+               for n in db.list_bogons(family=ns.family):
+                       f.write(n)
+
+               f.finish()
+
        def handle_export(self, db, ns):
                countries, asns = [], []
 
@@ -557,7 +580,7 @@ class CLI(object):
                        families = [ socket.AF_INET6, socket.AF_INET ]
 
                for object in ns.objects:
-                       m = re.match("^AS(\d+)$", object)
+                       m = re.match(r"^AS(\d+)$", object)
                        if m:
                                object = int(m.group(1))
 
index ac0c1eb7da963667ae13a4bb8af30d1b2a42cedc..9986a619485f83f10961cde304c21b4a6e6e3412 100644 (file)
 #include <sys/mman.h>
 #include <unistd.h>
 
-#include <loc/libloc.h>
-#include <loc/format.h>
-#include <loc/private.h>
-#include <loc/stringpool.h>
-
-enum loc_stringpool_mode {
-       STRINGPOOL_DEFAULT,
-       STRINGPOOL_MMAP,
-};
+#include <libloc/libloc.h>
+#include <libloc/format.h>
+#include <libloc/private.h>
+#include <libloc/stringpool.h>
+
+#define LOC_STRINGPOOL_BLOCK_SIZE      (512 * 1024)
 
 struct loc_stringpool {
        struct loc_ctx* ctx;
        int refcount;
 
-       enum loc_stringpool_mode mode;
-
-       char* data;
+       // Reference to any mapped data
+       const char* data;
        ssize_t length;
 
-       char* pos;
+       // Reference to own storage
+       char* blocks;
+       size_t size;
 };
 
-static off_t loc_stringpool_get_offset(struct loc_stringpool* pool, const char* pos) {
-       if (pos < pool->data)
-               return -EFAULT;
-
-       if (pos > (pool->data + pool->length))
-               return -EFAULT;
-
-       return pos - pool->data;
-}
-
-static char* __loc_stringpool_get(struct loc_stringpool* pool, off_t offset) {
-       if (offset < 0 || offset >= pool->length)
-               return NULL;
-
-       return pool->data + offset;
-}
-
-static int loc_stringpool_grow(struct loc_stringpool* pool, size_t length) {
-       DEBUG(pool->ctx, "Growing string pool to %zu bytes\n", length);
+static int loc_stringpool_grow(struct loc_stringpool* pool, const size_t size) {
+       DEBUG(pool->ctx, "Growing string pool by %zu byte(s)\n", size);
 
-       // Save pos pointer
-       off_t pos = loc_stringpool_get_offset(pool, pool->pos);
+       // Increment size
+       pool->size += size;
 
-       // Reallocate data section
-       pool->data = realloc(pool->data, length);
-       if (!pool->data)
-               return -ENOMEM;
-
-       pool->length = length;
+       // Reallocate blocks
+       pool->blocks = realloc(pool->blocks, pool->size);
+       if (!pool->blocks) {
+               ERROR(pool->ctx, "Could not grow string pool: %m\n");
+               return 1;
+       }
 
-       // Restore pos
-       pool->pos = __loc_stringpool_get(pool, pos);
+       // Update data pointer
+       pool->data = pool->blocks;
 
        return 0;
 }
 
 static off_t loc_stringpool_append(struct loc_stringpool* pool, const char* string) {
-       if (!string || !*string)
-               return -EINVAL;
+       if (!string) {
+               errno = EINVAL;
+               return -1;
+       }
 
        DEBUG(pool->ctx, "Appending '%s' to string pool at %p\n", string, pool);
 
+       // How much space to we need?
+       const size_t length = strlen(string) + 1;
+
        // Make sure we have enough space
-       int r = loc_stringpool_grow(pool, pool->length + strlen(string) + 1);
-       if (r) {
-               errno = r;
-               return -1;
+       if (pool->length + length > pool->size) {
+               int r = loc_stringpool_grow(pool, LOC_STRINGPOOL_BLOCK_SIZE);
+               if (r)
+                       return r;
        }
 
-       off_t offset = loc_stringpool_get_offset(pool, pool->pos);
+       off_t offset = pool->length;
 
-       // Copy string byte by byte
-       while (*string)
-               *pool->pos++ = *string++;
+       // Copy the string
+       memcpy(pool->blocks + offset, string, length);
 
-       // Terminate the string
-       *pool->pos++ = '\0';
+       // Update the length of the pool
+       pool->length += length;
 
        return offset;
 }
 
-static int __loc_stringpool_new(struct loc_ctx* ctx, struct loc_stringpool** pool, enum loc_stringpool_mode mode) {
+static void loc_stringpool_free(struct loc_stringpool* pool) {
+       DEBUG(pool->ctx, "Releasing string pool %p\n", pool);
+
+       // Free any data
+       if (pool->blocks)
+               free(pool->blocks);
+
+       loc_unref(pool->ctx);
+       free(pool);
+}
+
+int loc_stringpool_new(struct loc_ctx* ctx, struct loc_stringpool** pool) {
        struct loc_stringpool* p = calloc(1, sizeof(*p));
        if (!p)
-               return -ENOMEM;
+               return 1;
 
        p->ctx = loc_ref(ctx);
        p->refcount = 1;
 
-       // Save mode
-       p->mode = mode;
-
        *pool = p;
 
        return 0;
 }
 
-LOC_EXPORT int loc_stringpool_new(struct loc_ctx* ctx, struct loc_stringpool** pool) {
-       int r = __loc_stringpool_new(ctx, pool, STRINGPOOL_DEFAULT);
-       if (r)
-               return r;
-
-       // Add an empty string to new string pools
-       loc_stringpool_append(*pool, "");
-
-       return r;
-}
-
-static int loc_stringpool_mmap(struct loc_stringpool* pool, FILE* f, size_t length, off_t offset) {
-       if (pool->mode != STRINGPOOL_MMAP)
-               return -EINVAL;
+int loc_stringpool_open(struct loc_ctx* ctx, struct loc_stringpool** pool,
+               const char* data, const size_t length) {
+       struct loc_stringpool* p = NULL;
 
-       DEBUG(pool->ctx, "Reading string pool starting from %jd (%zu bytes)\n", (intmax_t)offset, length);
-
-       // Map file content into memory
-       pool->data = pool->pos = mmap(NULL, length, PROT_READ,
-               MAP_PRIVATE, fileno(f), offset);
+       // Allocate a new stringpool
+       int r = loc_stringpool_new(ctx, &p);
+       if (r)
+               goto ERROR;
 
-       // Store size of section
-       pool->length = length;
+       // Store data and length
+       p->data   = data;
+       p->length = length;
 
-       if (pool->data == MAP_FAILED)
-               return -errno;
+       DEBUG(p->ctx, "Opened string pool at %p (%zu bytes)\n", p->data, p->length);
 
+       *pool = p;
        return 0;
-}
 
-LOC_EXPORT int loc_stringpool_open(struct loc_ctx* ctx, struct loc_stringpool** pool,
-               FILE* f, size_t length, off_t offset) {
-       int r = __loc_stringpool_new(ctx, pool, STRINGPOOL_MMAP);
-       if (r)
-               return r;
-
-       // Map data into memory
-       if (length > 0) {
-               r = loc_stringpool_mmap(*pool, f, length, offset);
-               if (r)
-                       return r;
-       }
+ERROR:
+       if (p)
+               loc_stringpool_free(p);
 
-       return 0;
+       return r;
 }
 
-LOC_EXPORT struct loc_stringpool* loc_stringpool_ref(struct loc_stringpool* pool) {
+struct loc_stringpool* loc_stringpool_ref(struct loc_stringpool* pool) {
        pool->refcount++;
 
        return pool;
 }
 
-static void loc_stringpool_free(struct loc_stringpool* pool) {
-       DEBUG(pool->ctx, "Releasing string pool %p\n", pool);
-       int r;
-
-       switch (pool->mode) {
-               case STRINGPOOL_DEFAULT:
-                       if (pool->data)
-                               free(pool->data);
-                       break;
-
-               case STRINGPOOL_MMAP:
-                       if (pool->data) {
-                               r = munmap(pool->data, pool->length);
-                               if (r)
-                                       ERROR(pool->ctx, "Could not unmap data at %p: %s\n",
-                                               pool->data, strerror(errno));
-                       }
-                       break;
-       }
-
-       loc_unref(pool->ctx);
-       free(pool);
-}
-
-LOC_EXPORT struct loc_stringpool* loc_stringpool_unref(struct loc_stringpool* pool) {
+struct loc_stringpool* loc_stringpool_unref(struct loc_stringpool* pool) {
        if (--pool->refcount > 0)
                return NULL;
 
@@ -206,41 +154,49 @@ LOC_EXPORT struct loc_stringpool* loc_stringpool_unref(struct loc_stringpool* po
        return NULL;
 }
 
-static off_t loc_stringpool_get_next_offset(struct loc_stringpool* pool, off_t offset) {
-       const char* string = loc_stringpool_get(pool, offset);
-
-       return offset + strlen(string) + 1;
-}
+const char* loc_stringpool_get(struct loc_stringpool* pool, off_t offset) {
+       // Check boundaries
+       if (offset < 0 || offset >= pool->length) {
+               errno = ERANGE;
+               return NULL;
+       }
 
-LOC_EXPORT const char* loc_stringpool_get(struct loc_stringpool* pool, off_t offset) {
-       return __loc_stringpool_get(pool, offset);
+       // Return any data that we have in memory
+       return pool->data + offset;
 }
 
-LOC_EXPORT size_t loc_stringpool_get_size(struct loc_stringpool* pool) {
-       return loc_stringpool_get_offset(pool, pool->pos);
+size_t loc_stringpool_get_size(struct loc_stringpool* pool) {
+       return pool->length;
 }
 
 static off_t loc_stringpool_find(struct loc_stringpool* pool, const char* s) {
-       if (!s || !*s)
-               return -EINVAL;
+       if (!s || !*s) {
+               errno = EINVAL;
+               return -1;
+       }
 
        off_t offset = 0;
        while (offset < pool->length) {
                const char* string = loc_stringpool_get(pool, offset);
+
+               // Error!
                if (!string)
-                       break;
+                       return 1;
 
-               int r = strcmp(s, string);
-               if (r == 0)
+               // Is this a match?
+               if (strcmp(s, string) == 0)
                        return offset;
 
-               offset = loc_stringpool_get_next_offset(pool, offset);
+               // Shift offset
+               offset += strlen(string) + 1;
        }
 
-       return -ENOENT;
+       // Nothing found
+       errno = ENOENT;
+       return -1;
 }
 
-LOC_EXPORT off_t loc_stringpool_add(struct loc_stringpool* pool, const char* string) {
+off_t loc_stringpool_add(struct loc_stringpool* pool, const char* string) {
        off_t offset = loc_stringpool_find(pool, string);
        if (offset >= 0) {
                DEBUG(pool->ctx, "Found '%s' at position %jd\n", string, (intmax_t)offset);
@@ -250,21 +206,22 @@ LOC_EXPORT off_t loc_stringpool_add(struct loc_stringpool* pool, const char* str
        return loc_stringpool_append(pool, string);
 }
 
-LOC_EXPORT void loc_stringpool_dump(struct loc_stringpool* pool) {
+void loc_stringpool_dump(struct loc_stringpool* pool) {
        off_t offset = 0;
 
        while (offset < pool->length) {
                const char* string = loc_stringpool_get(pool, offset);
                if (!string)
-                       break;
+                       return;
 
                printf("%jd (%zu): %s\n", (intmax_t)offset, strlen(string), string);
 
-               offset = loc_stringpool_get_next_offset(pool, offset);
+               // Shift offset
+               offset += strlen(string) + 1;
        }
 }
 
-LOC_EXPORT size_t loc_stringpool_write(struct loc_stringpool* pool, FILE* f) {
+size_t loc_stringpool_write(struct loc_stringpool* pool, FILE* f) {
        size_t size = loc_stringpool_get_size(pool);
 
        return fwrite(pool->data, sizeof(*pool->data), size, f);
index 1c8e1165b065b84f1b1f9ac3a3b1df8201b00ba9..50ba01f1018958b17de3bd2b136a63b00b6db964 100644 (file)
@@ -1,6 +1,6 @@
 [Unit]
 Description=Automatic Location Database Updater
-Documentation=man:location(8) https://man-pages.ipfire.org/libloc/location.html
+Documentation=man:location(1) https://man-pages.ipfire.org/libloc/location.html
 Requires=network.target
 
 [Service]
diff --git a/src/test-address.c b/src/test-address.c
new file mode 100644 (file)
index 0000000..7012e41
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+       libloc - A library to determine the location of someone on the Internet
+
+       Copyright (C) 2022 IPFire Development Team <info@ipfire.org>
+
+       This program is free software; you can redistribute it and/or modify
+       it under the terms of the GNU General Public License as published by
+       the Free Software Foundation; either version 2 of the License, or
+       (at your option) any later version.
+
+       This program is distributed in the hope that it will be useful,
+       but WITHOUT ANY WARRANTY; without even the implied warranty of
+       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+       GNU General Public License for more details.
+*/
+
+#include <stdlib.h>
+#include <string.h>
+#include <syslog.h>
+
+#include <libloc/libloc.h>
+#include <libloc/address.h>
+#include <libloc/private.h>
+
+static int perform_tests(struct loc_ctx* ctx, const int family) {
+       struct in6_addr address = IN6ADDR_ANY_INIT;
+       const char* e = NULL;
+       const char* s = NULL;
+
+       // Reset IP address
+       loc_address_reset(&address, family);
+
+       if (!loc_address_all_zeroes(&address)) {
+               fprintf(stderr, "IP address isn't all zeroes\n");
+               return 1;
+       }
+
+       if (loc_address_all_ones(&address)) {
+               fprintf(stderr, "IP address isn't all ones\n");
+               return 1;
+       }
+
+       switch (family) {
+               case AF_INET6:
+                       e = "::";
+                       break;
+
+               case AF_INET:
+                       e = "0.0.0.0";
+                       break;
+       }
+
+       // Convert this to a string a few times
+       for (unsigned int i = 0; i < 100; i++) {
+               s = loc_address_str(&address);
+
+               printf("Iteration %d: %s\n", i, s);
+
+               if (strcmp(s, e) != 0) {
+                       fprintf(stderr, "IP address was formatted in an invalid format: %s\n", s);
+                       return 1;
+               }
+       }
+
+       // Increment the IP address
+       loc_address_increment(&address);
+
+       switch (family) {
+               case AF_INET6:
+                       e = "::1";
+                       break;
+
+               case AF_INET:
+                       e = "0.0.0.1";
+                       break;
+       }
+
+       s = loc_address_str(&address);
+
+       printf("Incremented IP address to %s\n", s);
+
+       if (strcmp(s, e) != 0) {
+               printf("IP address has been incremented incorrectly: %s\n", s);
+               return 1;
+       }
+
+       if (loc_address_all_zeroes(&address)) {
+               printf("IP address shouldn't be all zeroes any more\n");
+               return 1;
+       }
+
+       if (loc_address_all_ones(&address)) {
+               printf("IP address shouldn't be all ones any more\n");
+               return 1;
+       }
+
+       // Decrement the IP address
+       loc_address_decrement(&address);
+
+       s = loc_address_str(&address);
+
+       printf("Incremented IP address to %s\n", s);
+
+       if (!loc_address_all_zeroes(&address)) {
+               printf("IP address hasn't been decremented correctly: %s\n",
+                       loc_address_str(&address));
+               return 1;
+       }
+
+       return 0;
+}
+
+int main(int argc, char** argv) {
+       struct loc_ctx* ctx = NULL;
+       int r = EXIT_FAILURE;
+
+       int err = loc_new(&ctx);
+       if (err < 0)
+               exit(r);
+
+       // Enable debug logging
+       loc_set_log_priority(ctx, LOG_DEBUG);
+
+       // Perform all tests for IPv6
+       r = perform_tests(ctx, AF_INET6);
+       if (r)
+               goto ERROR;
+
+       // Perform all tests for IPv4
+       r = perform_tests(ctx, AF_INET);
+       if (r)
+               goto ERROR;
+
+ERROR:
+       loc_unref(ctx);
+
+       return r;
+}
index 2d61675f3866c04879cabad65a3e186d594432ae..b135c6b9ffd91d1369b1da94c99123c25a85ce0a 100644 (file)
@@ -19,9 +19,9 @@
 #include <string.h>
 #include <syslog.h>
 
-#include <loc/libloc.h>
-#include <loc/database.h>
-#include <loc/writer.h>
+#include <libloc/libloc.h>
+#include <libloc/database.h>
+#include <libloc/writer.h>
 
 #define TEST_AS_COUNT 5000
 
@@ -55,13 +55,13 @@ int main(int argc, char** argv) {
 
        FILE* f = tmpfile();
        if (!f) {
-               fprintf(stderr, "Could not open file for writing: %s\n", strerror(errno));
+               fprintf(stderr, "Could not open file for writing: %m\n");
                exit(EXIT_FAILURE);
        }
 
        err = loc_writer_write(writer, f, LOC_DATABASE_VERSION_UNSET);
        if (err) {
-               fprintf(stderr, "Could not write database: %s\n", strerror(-err));
+               fprintf(stderr, "Could not write database: %m\n");
                exit(EXIT_FAILURE);
        }
 
@@ -71,7 +71,7 @@ int main(int argc, char** argv) {
        struct loc_database* db;
        err = loc_database_new(ctx, &db, f);
        if (err) {
-               fprintf(stderr, "Could not open database: %s\n", strerror(-err));
+               fprintf(stderr, "Could not open database: %m\n");
                exit(EXIT_FAILURE);
        }
 
index d78c773b07a7a18bbeb0ae2fd6f6f21037c58c4c..f9db204c2a6183dc80d575ad5f727f2f2b199def 100644 (file)
 #include <string.h>
 #include <syslog.h>
 
-#include <loc/libloc.h>
-#include <loc/country.h>
-#include <loc/database.h>
-#include <loc/network.h>
-#include <loc/writer.h>
+#include <libloc/libloc.h>
+#include <libloc/country.h>
+#include <libloc/database.h>
+#include <libloc/network.h>
+#include <libloc/writer.h>
 
 int main(int argc, char** argv) {
        struct loc_country* country;
+       int flag;
        int err;
 
        // Check some valid country codes
-       if (!loc_country_code_is_valid("XX")) {
-               fprintf(stderr, "Valid country code detected as invalid: %s\n", "XX");
+       if (!loc_country_code_is_valid("DE")) {
+               fprintf(stderr, "Valid country code detected as invalid: %s\n", "DE");
                exit(EXIT_FAILURE);
        }
 
@@ -43,6 +44,48 @@ int main(int argc, char** argv) {
                exit(EXIT_FAILURE);
        }
 
+       // Test special country codes
+       flag = loc_country_special_code_to_flag("XX");
+       if (flag) {
+               fprintf(stderr, "Unexpectedly received a flag for XX: %d\n", flag);
+               exit(EXIT_FAILURE);
+       }
+
+       // A1
+       flag = loc_country_special_code_to_flag("A1");
+       if (flag != LOC_NETWORK_FLAG_ANONYMOUS_PROXY) {
+               fprintf(stderr, "Got a wrong flag for A1: %d\n", flag);
+               exit(EXIT_FAILURE);
+       }
+
+       // A2
+       flag = loc_country_special_code_to_flag("A2");
+       if (flag != LOC_NETWORK_FLAG_SATELLITE_PROVIDER) {
+               fprintf(stderr, "Got a wrong flag for A2: %d\n", flag);
+               exit(EXIT_FAILURE);
+       }
+
+       // A3
+       flag = loc_country_special_code_to_flag("A3");
+       if (flag != LOC_NETWORK_FLAG_ANYCAST) {
+               fprintf(stderr, "Got a wrong flag for A3: %d\n", flag);
+               exit(EXIT_FAILURE);
+       }
+
+       // XD
+       flag = loc_country_special_code_to_flag("XD");
+       if (flag != LOC_NETWORK_FLAG_DROP) {
+               fprintf(stderr, "Got a wrong flag for XD: %d\n", flag);
+               exit(EXIT_FAILURE);
+       }
+
+       // NULL input
+       flag = loc_country_special_code_to_flag(NULL);
+       if (flag >= 0) {
+               fprintf(stderr, "loc_country_special_code_to_flag didn't throw an error for NULL\n");
+               exit(EXIT_FAILURE);
+       }
+
        struct loc_ctx* ctx;
        err = loc_new(&ctx);
        if (err < 0)
@@ -58,7 +101,7 @@ int main(int argc, char** argv) {
                exit(EXIT_FAILURE);
 
        // Create a country
-       err = loc_writer_add_country(writer, &country, "XX");
+       err = loc_writer_add_country(writer, &country, "DE");
        if (err) {
                fprintf(stderr, "Could not create country\n");
                exit(EXIT_FAILURE);
@@ -81,13 +124,13 @@ int main(int argc, char** argv) {
 
        FILE* f = tmpfile();
        if (!f) {
-               fprintf(stderr, "Could not open file for writing: %s\n", strerror(errno));
+               fprintf(stderr, "Could not open file for writing: %m\n");
                exit(EXIT_FAILURE);
        }
 
        err = loc_writer_write(writer, f, LOC_DATABASE_VERSION_UNSET);
        if (err) {
-               fprintf(stderr, "Could not write database: %s\n", strerror(-err));
+               fprintf(stderr, "Could not write database: %m\n");
                exit(EXIT_FAILURE);
        }
        loc_writer_unref(writer);
@@ -96,18 +139,54 @@ int main(int argc, char** argv) {
        struct loc_database* db;
        err = loc_database_new(ctx, &db, f);
        if (err) {
-               fprintf(stderr, "Could not open database: %s\n", strerror(-err));
+               fprintf(stderr, "Could not open database: %m\n");
                exit(EXIT_FAILURE);
        }
 
        // Lookup an address in the subnet
        err = loc_database_get_country(db, &country, "YY");
-       if (err) {
+       if (err || !country) {
                fprintf(stderr, "Could not find country: YY\n");
                exit(EXIT_FAILURE);
        }
        loc_country_unref(country);
 
+       struct loc_network* network = NULL;
+
+       // Create a test network
+       err = loc_network_new_from_string(ctx, &network, "2001:db8::/64");
+       if (err) {
+               fprintf(stderr, "Could not create network: %m\n");
+               exit(EXIT_FAILURE);
+       }
+
+       // Set country code & flag
+       loc_network_set_country_code(network, "YY");
+       loc_network_set_flag(network, LOC_NETWORK_FLAG_ANONYMOUS_PROXY);
+
+       // Check if this network matches its own country code
+       err = loc_network_matches_country_code(network, "YY");
+       if (!err) {
+               fprintf(stderr, "Network does not match its own country code\n");
+               exit(EXIT_FAILURE);
+       }
+
+       // Check if this network matches the special country code
+       err = loc_network_matches_country_code(network, "A1");
+       if (!err) {
+               fprintf(stderr, "Network does not match the special country code A1\n");
+               exit(EXIT_FAILURE);
+       }
+
+       // Check if this network does not match another special country code
+       err = loc_network_matches_country_code(network, "A2");
+       if (err) {
+               fprintf(stderr, "Network matches another special country code A2\n");
+               exit(EXIT_FAILURE);
+       }
+
+       loc_network_unref(network);
+
        loc_database_unref(db);
        loc_unref(ctx);
        fclose(f);
index da4b11c9faeffdd8a2afc6c38b9c0f12b976ef96..8ba558a3eb4c82b34c0cfbb8787b5215c4613889 100644 (file)
@@ -24,9 +24,9 @@
 #include <unistd.h>
 #include <syslog.h>
 
-#include <loc/libloc.h>
-#include <loc/database.h>
-#include <loc/writer.h>
+#include <libloc/libloc.h>
+#include <libloc/database.h>
+#include <libloc/writer.h>
 
 const char* VENDOR = "Test Vendor";
 const char* DESCRIPTION =
@@ -167,13 +167,13 @@ int main(int argc, char** argv) {
 
        FILE* f = tmpfile();
        if (!f) {
-               fprintf(stderr, "Could not open file for writing: %s\n", strerror(errno));
+               fprintf(stderr, "Could not open file for writing: %m\n");
                exit(EXIT_FAILURE);
        }
 
        err = loc_writer_write(writer, f, LOC_DATABASE_VERSION_UNSET);
        if (err) {
-               fprintf(stderr, "Could not write database: %s\n", strerror(err));
+               fprintf(stderr, "Could not write database: %m\n");
                exit(EXIT_FAILURE);
        }
        loc_writer_unref(writer);
@@ -182,7 +182,7 @@ int main(int argc, char** argv) {
        struct loc_database* db;
        err = loc_database_new(ctx, &db, f);
        if (err) {
-               fprintf(stderr, "Could not open database: %s\n", strerror(-err));
+               fprintf(stderr, "Could not open database: %m\n");
                exit(EXIT_FAILURE);
        }
 
@@ -215,9 +215,8 @@ int main(int argc, char** argv) {
                if (!network)
                        break;
 
-               char* s = loc_network_str(network);
+               const char* s = loc_network_str(network);
                printf("Got network: %s\n", s);
-               free(s);
        }
 
        // Free the enumerator
index e8c2ebf558ea3ccd0872446b339480533687777f..41512e1bf232b573f39dfff8be030614925c0ae4 100644 (file)
@@ -24,7 +24,7 @@
 #include <unistd.h>
 #include <syslog.h>
 
-#include <loc/libloc.h>
+#include <libloc/libloc.h>
 
 int main(int argc, char** argv) {
        struct loc_ctx *ctx;
index 6f32ff7ba1e7da4d1b4848f30b08c930b96cf03b..70a6b89bdee916c529ea01f245fe0446b1ddb4cb 100644 (file)
@@ -21,9 +21,9 @@
 #include <string.h>
 #include <syslog.h>
 
-#include <loc/libloc.h>
-#include <loc/network.h>
-#include <loc/network-list.h>
+#include <libloc/libloc.h>
+#include <libloc/network.h>
+#include <libloc/network-list.h>
 
 int main(int argc, char** argv) {
        int err;
index dde13f1582dd32c662f6e9e722de7f849eca1af6..69544e29628bc32febc981ba3fa1f9dbc8dd2a24 100644 (file)
 #include <string.h>
 #include <syslog.h>
 
-#include <loc/libloc.h>
-#include <loc/database.h>
-#include <loc/network.h>
-#include <loc/writer.h>
+#include <libloc/libloc.h>
+#include <libloc/address.h>
+#include <libloc/database.h>
+#include <libloc/network.h>
+#include <libloc/private.h>
+#include <libloc/writer.h>
+
+static int test_reverse_pointers(struct loc_ctx* ctx) {
+       const struct test {
+               const char* network;
+               const char* rp;
+       } tests[] = {
+               // IPv6
+               { "::1/128", "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa." },
+               { "2001:db8::/32", "*.8.b.d.0.1.0.0.2.ip6.arpa." },
+
+               // IPv4
+               { "10.0.0.0/32", "0.0.0.10.in-addr.arpa." },
+               { "10.0.0.0/24", "*.0.0.10.in-addr.arpa." },
+               { "10.0.0.0/16", "*.0.10.in-addr.arpa." },
+               { "10.0.0.0/8", "*.10.in-addr.arpa." },
+               { "10.0.0.0/0", "*.in-addr.arpa." },
+               { "10.0.0.0/1", NULL, },
+               { NULL, NULL },
+       };
+
+       struct loc_network* network = NULL;
+       char* rp = NULL;
+       int r;
+
+       for (const struct test* test = tests; test->network; test++) {
+               // Create a new network
+               r = loc_network_new_from_string(ctx, &network, test->network);
+               if (r)
+                       return r;
+
+               // Fetch the reverse pointer
+               rp = loc_network_reverse_pointer(network, NULL);
+
+               // No RP expected and got none
+               if (!test->rp && !rp)
+                       continue;
+
+               // Got a result when expecting none
+               else if (!test->rp && rp) {
+                       fprintf(stderr, "Got an RP for %s when expecting none\n", test->network);
+                       return EXIT_FAILURE;
+
+               // Got nothing when expecting a result
+               } else if (test->rp && !rp) {
+                       fprintf(stderr, "Got no RP for %s when expecting one\n", test->network);
+                       return EXIT_FAILURE;
+
+               // Compare values
+               } else if (strcmp(test->rp, rp) != 0) {
+                       fprintf(stderr, "Got an unexpected RP for %s: Got %s, expected %s\n",
+                               test->network, rp, test->rp);
+                       return EXIT_FAILURE;
+               }
+
+               loc_network_unref(network);
+               if (rp)
+                       free(rp);
+       }
+
+       return 0;
+}
 
 int main(int argc, char** argv) {
        int err;
@@ -62,7 +125,7 @@ int main(int argc, char** argv) {
                exit(EXIT_FAILURE);
        }
 
-       err = loc_network_set_country_code(network1, "XX");
+       err = loc_network_set_country_code(network1, "DE");
        if (err) {
                fprintf(stderr, "Could not set country code\n");
                exit(EXIT_FAILURE);
@@ -78,7 +141,7 @@ int main(int argc, char** argv) {
 #endif
 
        // Check if the first and last addresses are correct
-       char* string = loc_network_format_first_address(network1);
+       const char* string = loc_network_format_first_address(network1);
        if (!string) {
                fprintf(stderr, "Did get NULL instead of a string for the first address\n");
                exit(EXIT_FAILURE);
@@ -100,7 +163,7 @@ int main(int argc, char** argv) {
                exit(EXIT_FAILURE);
        }
 
-       err = loc_network_match_address(network1, &address);
+       err = loc_network_matches_address(network1, &address);
        if (!err) {
                fprintf(stderr, "Network1 does not match address\n");
                exit(EXIT_FAILURE);
@@ -113,7 +176,7 @@ int main(int argc, char** argv) {
                exit(EXIT_FAILURE);
        }
 
-       err = loc_network_set_country_code(network2, "XY");
+       err = loc_network_set_country_code(network2, "DE");
        if (err) {
                fprintf(stderr, "Could not set country code\n");
                exit(EXIT_FAILURE);
@@ -174,13 +237,11 @@ int main(int argc, char** argv) {
                exit(EXIT_FAILURE);
        }
 
-       char* s = loc_network_str(subnet1);
+       const char* s = loc_network_str(subnet1);
        printf("Received subnet1 = %s\n", s);
-       free(s);
 
        s = loc_network_str(subnet2);
        printf("Received subnet2 = %s\n", s);
-       free(s);
 
        if (!loc_network_is_subnet(network1, subnet1)) {
                fprintf(stderr, "Subnet1 is not a subnet\n");
@@ -246,7 +307,7 @@ int main(int argc, char** argv) {
        // Try adding an invalid network
        struct loc_network* network;
        err = loc_writer_add_network(writer, &network, "xxxx:xxxx::/32");
-       if (err != -EINVAL) {
+       if (!err) {
                fprintf(stderr, "It was possible to add an invalid network (err = %d)\n", err);
                exit(EXIT_FAILURE);
        }
@@ -258,22 +319,15 @@ int main(int argc, char** argv) {
                exit(EXIT_FAILURE);
        }
 
-       // Try adding localhost
-       err = loc_writer_add_network(writer, &network, "::1/128");
-       if (err != -EINVAL) {
-               fprintf(stderr, "It was possible to add localhost (::1/128): %d\n", err);
-               exit(EXIT_FAILURE);
-       }
-
        FILE* f = tmpfile();
        if (!f) {
-               fprintf(stderr, "Could not open file for writing: %s\n", strerror(errno));
+               fprintf(stderr, "Could not open file for writing: %m\n");
                exit(EXIT_FAILURE);
        }
 
        err = loc_writer_write(writer, f, LOC_DATABASE_VERSION_UNSET);
        if (err) {
-               fprintf(stderr, "Could not write database: %s\n", strerror(-err));
+               fprintf(stderr, "Could not write database: %m\n");
                exit(EXIT_FAILURE);
        }
        loc_writer_unref(writer);
@@ -291,7 +345,7 @@ int main(int argc, char** argv) {
        struct loc_database* db;
        err = loc_database_new(ctx, &db, f);
        if (err) {
-               fprintf(stderr, "Could not open database: %s\n", strerror(-err));
+               fprintf(stderr, "Could not open database: %m\n");
                exit(EXIT_FAILURE);
        }
 
@@ -309,7 +363,44 @@ int main(int argc, char** argv) {
                fprintf(stderr, "Could look up 2001:db8:fffe:1::, but I shouldn't\n");
                exit(EXIT_FAILURE);
        }
-       loc_network_unref(network1);
+
+       const struct bit_length_test {
+               const char* network;
+               unsigned int bit_length;
+       } bit_length_tests[] = {
+               { "::/0", 0 },
+               { "2001::/128", 16 },
+               { "1.0.0.0/32", 8 },
+               { "0.0.0.1/32", 32 },
+               { "255.255.255.255/32", 32 },
+               { NULL, 0, },
+       };
+
+       for (const struct bit_length_test* t = bit_length_tests; t->network; t++) {
+               err = loc_network_new_from_string(ctx, &network1, t->network);
+               if (err) {
+                       fprintf(stderr, "Could not create network %s: %m\n", t->network);
+                       exit(EXIT_FAILURE);
+               }
+
+               const struct in6_addr* addr = loc_network_get_first_address(network1);
+
+               unsigned int bit_length = loc_address_bit_length(addr);
+
+               if (bit_length != t->bit_length) {
+                       printf("Bit length of %s didn't match: %u != %u\n",
+                               t->network, t->bit_length, bit_length);
+                       loc_network_unref(network1);
+                       exit(EXIT_FAILURE);
+               }
+
+               loc_network_unref(network1);
+       }
+
+       // Test reverse pointers
+       err = test_reverse_pointers(ctx);
+       if (err)
+               exit(err);
 
        loc_unref(ctx);
        fclose(f);
index 620517c922469708976f5afe41398043c0d92758..e1be5b1889122d6b4ff9f2ad51eeb135307aaeb5 100644 (file)
@@ -24,9 +24,9 @@
 #include <unistd.h>
 #include <syslog.h>
 
-#include <loc/libloc.h>
-#include <loc/database.h>
-#include <loc/writer.h>
+#include <libloc/libloc.h>
+#include <libloc/database.h>
+#include <libloc/writer.h>
 
 int main(int argc, char** argv) {
        int err;
@@ -34,20 +34,20 @@ int main(int argc, char** argv) {
        // Open public key
        FILE* public_key = fopen(ABS_SRCDIR "/examples/public-key.pem", "r");
        if (!public_key) {
-               fprintf(stderr, "Could not open public key file: %s\n", strerror(errno));
+               fprintf(stderr, "Could not open public key file: %m\n");
                exit(EXIT_FAILURE);
        }
 
        // Open private key
        FILE* private_key1 = fopen(ABS_SRCDIR "/examples/private-key.pem", "r");
        if (!private_key1) {
-               fprintf(stderr, "Could not open private key file: %s\n", strerror(errno));
+               fprintf(stderr, "Could not open private key file: %m\n");
                exit(EXIT_FAILURE);
        }
 
        FILE* private_key2 = fopen(ABS_SRCDIR "/examples/private-key.pem", "r");
        if (!private_key2) {
-               fprintf(stderr, "Could not open private key file: %s\n", strerror(errno));
+               fprintf(stderr, "Could not open private key file: %m\n");
                exit(EXIT_FAILURE);
        }
 
@@ -67,13 +67,13 @@ int main(int argc, char** argv) {
 
        FILE* f = tmpfile();
        if (!f) {
-               fprintf(stderr, "Could not open file for writing: %s\n", strerror(errno));
+               fprintf(stderr, "Could not open file for writing: %m\n");
                exit(EXIT_FAILURE);
        }
 
        err = loc_writer_write(writer, f, LOC_DATABASE_VERSION_UNSET);
        if (err) {
-               fprintf(stderr, "Could not write database: %s\n", strerror(err));
+               fprintf(stderr, "Could not write database: %m\n");
                exit(EXIT_FAILURE);
        }
        loc_writer_unref(writer);
@@ -82,7 +82,7 @@ int main(int argc, char** argv) {
        struct loc_database* db;
        err = loc_database_new(ctx, &db, f);
        if (err) {
-               fprintf(stderr, "Could not open database: %s\n", strerror(-err));
+               fprintf(stderr, "Could not open database: %m\n");
                exit(EXIT_FAILURE);
        }
 
@@ -94,9 +94,9 @@ int main(int argc, char** argv) {
        }
 
        // Open another public key
-       public_key = freopen(ABS_SRCDIR "/src/signing-key.pem", "r", public_key);
+       public_key = freopen(ABS_SRCDIR "/data/signing-key.pem", "r", public_key);
        if (!public_key) {
-               fprintf(stderr, "Could not open public key file: %s\n", strerror(errno));
+               fprintf(stderr, "Could not open public key file: %m\n");
                exit(EXIT_FAILURE);
        }
 
index 3c2890a0ae9fe7f599e61a99b336b224b24fe5c6..a94d8f8bd07815e65f6024ee6f5dc4f3d0c4c54b 100644 (file)
@@ -26,8 +26,8 @@
 #include <time.h>
 #include <syslog.h>
 
-#include <loc/libloc.h>
-#include <loc/stringpool.h>
+#include <libloc/libloc.h>
+#include <libloc/stringpool.h>
 
 static const char* characters = "012345789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
 
@@ -74,7 +74,7 @@ int main(int argc, char** argv) {
        // Append a string
        off_t pos = loc_stringpool_add(pool, "ABC");
        if (pos < 0) {
-               fprintf(stderr, "Could not add string: %s\n", strerror(-pos));
+               fprintf(stderr, "Could not add string: %m\n");
                exit(EXIT_FAILURE);
        }
 
@@ -108,7 +108,7 @@ int main(int argc, char** argv) {
                free(string);
 
                if (pos < 0) {
-                       fprintf(stderr, "Could not add string %d: %s\n", i, strerror(-pos));
+                       fprintf(stderr, "Could not add string %d: %m\n", i);
                        exit(EXIT_FAILURE);
                }
        }
index c61a6dfc657e92c1f552207f9af81cf671cb38da..13948c282dbd199bd8fc7417c8f06d5a24c1071a 100644 (file)
 #include <openssl/evp.h>
 #include <openssl/pem.h>
 
-#include <loc/libloc.h>
-#include <loc/as.h>
-#include <loc/compat.h>
-#include <loc/country.h>
-#include <loc/database.h>
-#include <loc/format.h>
-#include <loc/network.h>
-#include <loc/private.h>
-#include <loc/writer.h>
+#include <libloc/libloc.h>
+#include <libloc/as.h>
+#include <libloc/as-list.h>
+#include <libloc/compat.h>
+#include <libloc/country.h>
+#include <libloc/country-list.h>
+#include <libloc/database.h>
+#include <libloc/format.h>
+#include <libloc/network.h>
+#include <libloc/network-tree.h>
+#include <libloc/private.h>
+#include <libloc/writer.h>
 
 struct loc_writer {
        struct loc_ctx* ctx;
@@ -60,13 +63,10 @@ struct loc_writer {
        char signature2[LOC_SIGNATURE_MAX_LENGTH];
        size_t signature2_length;
 
-       struct loc_as** as;
-       size_t as_count;
-
-       struct loc_country** countries;
-       size_t countries_count;
-
        struct loc_network_tree* networks;
+
+       struct loc_as_list* as_list;
+       struct loc_country_list* country_list;
 };
 
 static int parse_private_key(struct loc_writer* writer, EVP_PKEY** private_key, FILE* f) {
@@ -92,7 +92,7 @@ LOC_EXPORT int loc_writer_new(struct loc_ctx* ctx, struct loc_writer** writer,
                FILE* fkey1, FILE* fkey2) {
        struct loc_writer* w = calloc(1, sizeof(*w));
        if (!w)
-               return -ENOMEM;
+               return 1;
 
        w->ctx = loc_ref(ctx);
        w->refcount = 1;
@@ -103,6 +103,13 @@ LOC_EXPORT int loc_writer_new(struct loc_ctx* ctx, struct loc_writer** writer,
                return r;
        }
 
+       // Add an empty string to the stringpool
+       r = loc_stringpool_add(w->pool, "");
+       if (r) {
+               loc_writer_unref(w);
+               return r;
+       }
+
        // Initialize the network tree
        r = loc_network_tree_new(ctx, &w->networks);
        if (r) {
@@ -110,6 +117,20 @@ LOC_EXPORT int loc_writer_new(struct loc_ctx* ctx, struct loc_writer** writer,
                return r;
        }
 
+       // Initialize AS list
+       r = loc_as_list_new(ctx, &w->as_list);
+       if (r) {
+               loc_writer_unref(w);
+               return r;
+       }
+
+       // Initialize countries list
+       r = loc_country_list_new(ctx, &w->country_list);
+       if (r) {
+               loc_writer_unref(w);
+               return r;
+       }
+
        // Load the private keys to sign databases
        if (fkey1) {
                r = parse_private_key(w, &w->private_key1, fkey1);
@@ -147,27 +168,20 @@ static void loc_writer_free(struct loc_writer* writer) {
                EVP_PKEY_free(writer->private_key2);
 
        // Unref all AS
-       if (writer->as) {
-               for (unsigned int i = 0; i < writer->as_count; i++) {
-                       loc_as_unref(writer->as[i]);
-               }
-               free(writer->as);
-       }
+       if (writer->as_list)
+               loc_as_list_unref(writer->as_list);
 
        // Unref all countries
-       if (writer->countries) {
-               for (unsigned int i = 0; i < writer->countries_count; i++) {
-                       loc_country_unref(writer->countries[i]);
-               }
-               free(writer->countries);
-       }
+       if (writer->country_list)
+               loc_country_list_unref(writer->country_list);
 
        // Release network tree
        if (writer->networks)
                loc_network_tree_unref(writer->networks);
 
        // Unref the string pool
-       loc_stringpool_unref(writer->pool);
+       if (writer->pool)
+               loc_stringpool_unref(writer->pool);
 
        loc_unref(writer->ctx);
        free(writer);
@@ -224,30 +238,14 @@ LOC_EXPORT int loc_writer_set_license(struct loc_writer* writer, const char* lic
        return 0;
 }
 
-static int __loc_as_cmp(const void* as1, const void* as2) {
-       return loc_as_cmp(*(struct loc_as**)as1, *(struct loc_as**)as2);
-}
-
 LOC_EXPORT int loc_writer_add_as(struct loc_writer* writer, struct loc_as** as, uint32_t number) {
+       // Create a new AS object
        int r = loc_as_new(writer->ctx, as, number);
        if (r)
                return r;
 
-       // We have a new AS to add
-       writer->as_count++;
-
-       // Make space
-       writer->as = realloc(writer->as, sizeof(*writer->as) * writer->as_count);
-       if (!writer->as)
-               return -ENOMEM;
-
-       // Add as last element
-       writer->as[writer->as_count - 1] = loc_as_ref(*as);
-
-       // Sort everything
-       qsort(writer->as, writer->as_count, sizeof(*writer->as), __loc_as_cmp);
-
-       return 0;
+       // Append it to the list
+       return loc_as_list_append(writer->as_list, *as);
 }
 
 LOC_EXPORT int loc_writer_add_network(struct loc_writer* writer, struct loc_network** network, const char* string) {
@@ -262,30 +260,14 @@ LOC_EXPORT int loc_writer_add_network(struct loc_writer* writer, struct loc_netw
        return loc_network_tree_add_network(writer->networks, *network);
 }
 
-static int __loc_country_cmp(const void* country1, const void* country2) {
-       return loc_country_cmp(*(struct loc_country**)country1, *(struct loc_country**)country2);
-}
-
 LOC_EXPORT int loc_writer_add_country(struct loc_writer* writer, struct loc_country** country, const char* country_code) {
+       // Allocate a new country
        int r = loc_country_new(writer->ctx, country, country_code);
        if (r)
                return r;
 
-       // We have a new country to add
-       writer->countries_count++;
-
-       // Make space
-       writer->countries = realloc(writer->countries, sizeof(*writer->countries) * writer->countries_count);
-       if (!writer->countries)
-               return -ENOMEM;
-
-       // Add as last element
-       writer->countries[writer->countries_count - 1] = loc_country_ref(*country);
-
-       // Sort everything
-       qsort(writer->countries, writer->countries_count, sizeof(*writer->countries), __loc_country_cmp);
-
-       return 0;
+       // Append it to the list
+       return loc_country_list_append(writer->country_list, *country);
 }
 
 static void make_magic(struct loc_writer* writer, struct loc_database_magic* magic,
@@ -325,20 +307,32 @@ static int loc_database_write_as_section(struct loc_writer* writer,
        DEBUG(writer->ctx, "AS section starts at %jd bytes\n", (intmax_t)*offset);
        header->as_offset = htobe32(*offset);
 
-       size_t as_length = 0;
+       // Sort the AS list first
+       loc_as_list_sort(writer->as_list);
+
+       const size_t as_count = loc_as_list_size(writer->as_list);
+
+       struct loc_database_as_v1 block;
+       size_t block_length = 0;
+
+       for (unsigned int i = 0; i < as_count; i++) {
+               struct loc_as* as = loc_as_list_get(writer->as_list, i);
+               if (!as)
+                       return 1;
 
-       struct loc_database_as_v1 as;
-       for (unsigned int i = 0; i < writer->as_count; i++) {
                // Convert AS into database format
-               loc_as_to_database_v1(writer->as[i], writer->pool, &as);
+               loc_as_to_database_v1(as, writer->pool, &block);
 
                // Write to disk
-               *offset += fwrite(&as, 1, sizeof(as), f);
-               as_length += sizeof(as);
+               *offset += fwrite(&block, 1, sizeof(block), f);
+               block_length += sizeof(block);
+
+               // Unref AS
+               loc_as_unref(as);
        }
 
-       DEBUG(writer->ctx, "AS section has a length of %zu bytes\n", as_length);
-       header->as_length = htobe32(as_length);
+       DEBUG(writer->ctx, "AS section has a length of %zu bytes\n", block_length);
+       header->as_length = htobe32(block_length);
 
        align_page_boundary(offset, f);
 
@@ -396,6 +390,8 @@ static void free_network(struct network* network) {
 
 static int loc_database_write_networks(struct loc_writer* writer,
                struct loc_database_header_v1* header, off_t* offset, FILE* f) {
+       int r;
+
        // Write the network tree
        DEBUG(writer->ctx, "Network tree starts at %jd bytes\n", (intmax_t)*offset);
        header->network_tree_offset = htobe32(*offset);
@@ -420,9 +416,16 @@ static int loc_database_write_networks(struct loc_writer* writer,
        TAILQ_HEAD(network_t, network) networks;
        TAILQ_INIT(&networks);
 
+       // Cleanup the tree before writing it
+       r = loc_network_tree_cleanup(writer->networks);
+       if (r)
+               return r;
+
        // Add root
        struct loc_network_tree_node* root = loc_network_tree_get_root(writer->networks);
        node = make_node(root);
+       if (!node)
+               return 1;
 
        TAILQ_INSERT_TAIL(&nodes, node, nodes);
 
@@ -463,6 +466,10 @@ static int loc_database_write_networks(struct loc_writer* writer,
 
                        // Append network to be written out later
                        struct network* nw = make_network(network);
+                       if (!nw) {
+                               free_node(node);
+                               return 1;
+                       }
                        TAILQ_INSERT_TAIL(&networks, nw, networks);
 
                        db_node.network = htobe32(network_index++);
@@ -497,7 +504,7 @@ static int loc_database_write_networks(struct loc_writer* writer,
                TAILQ_REMOVE(&networks, nw, networks);
 
                // Prepare what we are writing to disk
-               int r = loc_network_to_database_v1(nw->network, &db_network);
+               r = loc_network_to_database_v1(nw->network, &db_network);
                if (r)
                        return r;
 
@@ -519,20 +526,24 @@ static int loc_database_write_countries(struct loc_writer* writer,
        DEBUG(writer->ctx, "Countries section starts at %jd bytes\n", (intmax_t)*offset);
        header->countries_offset = htobe32(*offset);
 
-       size_t countries_length = 0;
+       const size_t countries_count = loc_country_list_size(writer->country_list);
+
+       struct loc_database_country_v1 block;
+       size_t block_length = 0;
+
+       for (unsigned int i = 0; i < countries_count; i++) {
+               struct loc_country* country = loc_country_list_get(writer->country_list, i);
 
-       struct loc_database_country_v1 country;
-       for (unsigned int i = 0; i < writer->countries_count; i++) {
                // Convert country into database format
-               loc_country_to_database_v1(writer->countries[i], writer->pool, &country);
+               loc_country_to_database_v1(country, writer->pool, &block);
 
                // Write to disk
-               *offset += fwrite(&country, 1, sizeof(country), f);
-               countries_length += sizeof(country);
+               *offset += fwrite(&block, 1, sizeof(block), f);
+               block_length += sizeof(block);
        }
 
-       DEBUG(writer->ctx, "Countries section has a length of %zu bytes\n", countries_length);
-       header->countries_length = htobe32(countries_length);
+       DEBUG(writer->ctx, "Countries section has a length of %zu bytes\n", block_length);
+       header->countries_length = htobe32(block_length);
 
        align_page_boundary(offset, f);
 
@@ -542,6 +553,8 @@ static int loc_database_write_countries(struct loc_writer* writer,
 static int loc_writer_create_signature(struct loc_writer* writer,
                struct loc_database_header_v1* header, FILE* f, EVP_PKEY* private_key,
                char* signature, size_t* length) {
+       size_t bytes_read = 0;
+
        DEBUG(writer->ctx, "Creating signature...\n");
 
        // Read file from the beginning
@@ -559,7 +572,12 @@ static int loc_writer_create_signature(struct loc_writer* writer,
 
        // Read magic
        struct loc_database_magic magic;
-       fread(&magic, 1, sizeof(magic), f);
+       bytes_read = fread(&magic, 1, sizeof(magic), f);
+       if (bytes_read < sizeof(magic)) {
+               ERROR(writer->ctx, "Could not read header: %m\n");
+               r = 1;
+               goto END;
+       }
 
        hexdump(writer->ctx, &magic, sizeof(magic));
 
@@ -585,11 +603,11 @@ static int loc_writer_create_signature(struct loc_writer* writer,
        // Walk through the file in chunks of 64kB
        char buffer[64 * 1024];
        while (!feof(f)) {
-               size_t bytes_read = fread(buffer, 1, sizeof(buffer), f);
+               bytes_read = fread(buffer, 1, sizeof(buffer), f);
 
                if (ferror(f)) {
-                       ERROR(writer->ctx, "Error reading from file: %s\n", strerror(errno));
-                       r = errno;
+                       ERROR(writer->ctx, "Error reading from file: %m\n");
+                       r = 1;
                        goto END;
                }
 
@@ -737,7 +755,7 @@ LOC_EXPORT int loc_writer_write(struct loc_writer* writer, FILE* f, enum loc_dat
 
        if (writer->signature2_length) {
                DEBUG(writer->ctx, "Copying second signature of %zu byte(s)\n",
-                       writer->signature1_length);
+                       writer->signature2_length);
 
                memcpy(header.signature2, writer->signature2, writer->signature2_length);
                header.signature2_length = htobe16(writer->signature2_length);
@@ -750,5 +768,8 @@ LOC_EXPORT int loc_writer_write(struct loc_writer* writer, FILE* f, enum loc_dat
 
        fwrite(&header, 1, sizeof(header), f);
 
+       // Flush everything
+       fflush(f);
+
        return r;
 }
diff --git a/tests/lua/main.lua b/tests/lua/main.lua
new file mode 100755 (executable)
index 0000000..e139b2d
--- /dev/null
@@ -0,0 +1,183 @@
+#!/usr/bin/lua
+--[[###########################################################################
+#                                                                             #
+# libloc - A library to determine the location of someone on the Internet     #
+#                                                                             #
+# Copyright (C) 2024 IPFire Development Team <info@ipfire.org>                #
+#                                                                             #
+# This library is free software; you can redistribute it and/or               #
+# modify it under the terms of the GNU Lesser General Public                  #
+# License as published by the Free Software Foundation; either                #
+# version 2.1 of the License, or (at your option) any later version.          #
+#                                                                             #
+# This library is distributed in the hope that it will be useful,             #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of              #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU           #
+# Lesser General Public License for more details.                             #
+#                                                                             #
+############################################################################--]]
+
+luaunit = require("luaunit")
+
+ENV_TEST_DATABASE    = os.getenv("TEST_DATABASE")
+ENV_TEST_SIGNING_KEY = os.getenv("TEST_SIGNING_KEY")
+
+function test_load()
+       -- Try loading the module
+       location = require("location")
+
+       -- Print the version
+       print(location.version())
+end
+
+function test_open_database()
+       location = require("location")
+
+       -- Open the database
+       db = location.Database.open(ENV_TEST_DATABASE)
+
+       -- Verify
+       luaunit.assertIsTrue(db:verify(ENV_TEST_SIGNING_KEY))
+
+       -- Description
+       luaunit.assertIsString(db:get_description())
+
+       -- License
+       luaunit.assertIsString(db:get_license())
+       luaunit.assertEquals(db:get_license(), "CC BY-SA 4.0")
+
+       -- Vendor
+       luaunit.assertIsString(db:get_vendor())
+       luaunit.assertEquals(db:get_vendor(), "IPFire Project")
+end
+
+function test_lookup()
+       location = require("location")
+
+       -- Open the database
+       db = location.Database.open(ENV_TEST_DATABASE)
+
+       -- Perform a lookup
+       network1 = db:lookup("81.3.27.32")
+
+       luaunit.assertEquals(network1:get_family(), 2) -- AF_INET
+       luaunit.assertEquals(network1:get_country_code(), "DE")
+       luaunit.assertEquals(network1:get_asn(), 24679)
+
+       -- Lookup something else
+       network2 = db:lookup("8.8.8.8")
+       luaunit.assertIsTrue(network2:has_flag(location.NETWORK_FLAG_ANYCAST))
+       luaunit.assertIsFalse(network2:has_flag(location.NETWORK_FLAG_DROP))
+end
+
+function test_network()
+       location = require("location")
+
+       n1 = location.Network.new("10.0.0.0/8")
+
+       -- The ASN should be nul
+       luaunit.assertNil(n1:get_asn())
+
+       -- The family should be IPv4
+       luaunit.assertEquals(n1:get_family(), 2) -- AF_INET
+
+       -- The country code should be empty
+       luaunit.assertNil(n1:get_country_code())
+end
+
+function test_as()
+       location = require("location")
+
+       -- Create a new AS
+       as = location.AS.new(12345)
+       luaunit.assertEquals(as:get_number(), 12345)
+       luaunit.assertNil(as:get_name())
+
+       -- Reset
+       as = nil
+end
+
+function test_fetch_as()
+       location = require("location")
+
+       -- Open the database
+       db = location.Database.open(ENV_TEST_DATABASE)
+
+       -- Fetch an AS
+       as = db:get_as(0)
+
+       -- This should not exist
+       luaunit.assertNil(as)
+
+       -- Fetch something that exists
+       as = db:get_as(204867)
+       luaunit.assertEquals(as:get_number(), 204867)
+       luaunit.assertEquals(as:get_name(), "Lightning Wire Labs GmbH")
+end
+
+function test_country()
+       location = require("location")
+
+       c1 = location.Country.new("DE")
+       luaunit.assertEquals(c1:get_code(), "DE")
+       luaunit.assertNil(c1:get_name())
+       luaunit.assertNil(c1:get_continent_code())
+
+       c2 = location.Country.new("GB")
+       luaunit.assertNotEquals(c1, c2)
+
+       c1 = nil
+       c2 = nil
+end
+
+function test_fetch_country()
+       location = require("location")
+
+       -- Open the database
+       db = location.Database.open(ENV_TEST_DATABASE)
+
+       -- Fetch an invalid country
+       c = db:get_country("XX")
+       luaunit.assertNil(c)
+
+       -- Fetch something that exists
+       c = db:get_country("DE")
+       luaunit.assertEquals(c:get_code(), "DE")
+       luaunit.assertEquals(c:get_name(), "Germany")
+end
+
+-- This test is not very deterministic but should help to test the GC methods
+function test_gc()
+       print("GC: " .. collectgarbage("collect"))
+end
+
+function test_subnets()
+       location = require("location")
+
+       -- Open the database
+       db = location.Database.open(ENV_TEST_DATABASE)
+
+       local network = db:lookup("1.1.1.1")
+
+       local subnets = network:subnets()
+
+       luaunit.assertIsTable(subnets)
+       luaunit.assertEquals(#subnets, 2)
+
+       for i, subnet in ipairs(subnets) do
+               print(subnet)
+       end
+end
+
+function test_list_networks()
+       location = require("location")
+
+       -- Open the database
+       db = location.Database.open(ENV_TEST_DATABASE)
+
+       for network in db:list_networks() do
+               print(network, network:reverse_pointer())
+       end
+end
+
+os.exit(luaunit.LuaUnit.run())
diff --git a/tests/python/country.py b/tests/python/country.py
new file mode 100755 (executable)
index 0000000..d38d46a
--- /dev/null
@@ -0,0 +1,73 @@
+#!/usr/bin/python3
+###############################################################################
+#                                                                             #
+# libloc - A library to determine the location of someone on the Internet     #
+#                                                                             #
+# Copyright (C) 2024 IPFire Development Team <info@ipfire.org>                #
+#                                                                             #
+# This library is free software; you can redistribute it and/or               #
+# modify it under the terms of the GNU Lesser General Public                  #
+# License as published by the Free Software Foundation; either                #
+# version 2.1 of the License, or (at your option) any later version.          #
+#                                                                             #
+# This library is distributed in the hope that it will be useful,             #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of              #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU           #
+# Lesser General Public License for more details.                             #
+#                                                                             #
+###############################################################################
+
+import location
+import unittest
+
+class Test(unittest.TestCase):
+       def test_properties(self):
+               c = location.Country("DE")
+
+               # The code should be DE
+               self.assertEqual(c.code, "DE")
+
+               # All other attributes should return None
+               self.assertIsNone(c.name)
+               self.assertIsNone(c.continent_code)
+
+               # Set a name and read it back
+               c.name = "Germany"
+               self.assertEqual(c.name, "Germany")
+
+               # Set a continent code and read it back
+               c.continent_code = "EU"
+               self.assertEqual(c.continent_code, "EU")
+
+       def test_country_cmp(self):
+               """
+                       Performs some comparison tests
+               """
+               c1 = location.Country("DE")
+               c2 = location.Country("DE")
+
+               # c1 and c2 should be equal
+               self.assertEqual(c1, c2)
+
+               # We cannot compare against strings for example
+               self.assertNotEqual(c1, "DE")
+
+               c3 = location.Country("AT")
+
+               # c1 and c3 should not be equal
+               self.assertNotEqual(c1, c3)
+
+               # c3 comes before c1 (alphabetically)
+               self.assertGreater(c1, c3)
+               self.assertLess(c3, c1)
+
+       def test_country_hash(self):
+               """
+                       Tests if the hash function works
+               """
+               c = location.Country("DE")
+
+               self.assertTrue(hash(c))
+
+if __name__ == "__main__":
+       unittest.main()
diff --git a/tests/python/networks-dedup.py b/tests/python/networks-dedup.py
new file mode 100755 (executable)
index 0000000..5b78a4b
--- /dev/null
@@ -0,0 +1,165 @@
+#!/usr/bin/python3
+###############################################################################
+#                                                                             #
+# libloc - A library to determine the location of someone on the Internet     #
+#                                                                             #
+# Copyright (C) 2024 IPFire Development Team <info@ipfire.org>                #
+#                                                                             #
+# This library is free software; you can redistribute it and/or               #
+# modify it under the terms of the GNU Lesser General Public                  #
+# License as published by the Free Software Foundation; either                #
+# version 2.1 of the License, or (at your option) any later version.          #
+#                                                                             #
+# This library is distributed in the hope that it will be useful,             #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of              #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU           #
+# Lesser General Public License for more details.                             #
+#                                                                             #
+###############################################################################
+
+import location
+import os
+import tempfile
+import unittest
+
+class Test(unittest.TestCase):
+       def setUp(self):
+               # Show even very large diffs
+               self.maxDiff = None
+
+       def __test(self, inputs, outputs=None):
+               """
+                       Takes a list of networks that are written to the database and
+                       compares the result with the second argument.
+               """
+               if outputs is None:
+                       outputs = [network for network, cc, asn in inputs]
+
+               with tempfile.NamedTemporaryFile() as f:
+                       w = location.Writer()
+
+                       # Add all inputs
+                       for network, cc, asn in inputs:
+                               n = w.add_network(network)
+
+                               # Add CC
+                               if cc:
+                                       n.country_code = cc
+
+                               # Add ASN
+                               if asn:
+                                       n.asn = asn
+
+                       # Write file
+                       w.write(f.name)
+
+                       # Re-open the database
+                       db = location.Database(f.name)
+
+                       # Check if the output matches what we expect
+                       self.assertCountEqual(
+                               outputs, ["%s" % network for network in db.networks],
+                       )
+
+       def test_dudup_simple(self):
+               """
+                       Creates a couple of redundant networks and expects fewer being written
+               """
+               self.__test(
+                       (
+                               ("10.0.0.0/8", None, None),
+                               ("10.0.0.0/16", None, None),
+                               ("10.0.0.0/24", None, None),
+                       ),
+
+                       # Everything should be put into the /8 subnet
+                       ("10.0.0.0/8",),
+               )
+
+       def test_dedup_noop(self):
+               """
+                       Nothing should be changed here
+               """
+               networks = (
+                       ("10.0.0.0/8", None, None),
+                       ("20.0.0.0/8", None, None),
+                       ("30.0.0.0/8", None, None),
+                       ("40.0.0.0/8", None, None),
+                       ("50.0.0.0/8", None, None),
+                       ("60.0.0.0/8", None, None),
+                       ("70.0.0.0/8", None, None),
+                       ("80.0.0.0/8", None, None),
+                       ("90.0.0.0/8", None, None),
+               )
+
+               # The input should match the output
+               self.__test(networks)
+
+       def test_dedup_with_properties(self):
+               """
+                       A more complicated deduplication test where properties have been set
+               """
+               # Nothing should change here because of different countries
+               self.__test(
+                       (
+                               ("10.0.0.0/8",  "DE", None),
+                               ("10.0.0.0/16", "AT", None),
+                               ("10.0.0.0/24", "DE", None),
+                       ),
+               )
+
+               # Nothing should change here because of different ASNs
+               self.__test(
+                       (
+                               ("10.0.0.0/8",  None, 1000),
+                               ("10.0.0.0/16", None, 2000),
+                               ("10.0.0.0/24", None, 1000),
+                       ),
+               )
+
+               # Everything can be merged again
+               self.__test(
+                       (
+                               ("10.0.0.0/8",  "DE", 1000),
+                               ("10.0.0.0/16", "DE", 1000),
+                               ("10.0.0.0/24", "DE", 1000),
+                       ),
+                       ("10.0.0.0/8",),
+               )
+
+       def test_merge(self):
+               """
+                       Checks whether the merging algorithm works
+               """
+               self.__test(
+                       (
+                               ("10.0.0.0/9",   None, None),
+                               ("10.128.0.0/9", None, None),
+                       ),
+                       ("10.0.0.0/8",),
+               )
+
+       def test_bug13236(self):
+               self.__test(
+                       (
+                               ("209.38.0.0/16",   "US", None),
+                               ("209.38.1.0/24",   "US", 14061),
+                               ("209.38.160.0/22", "US", 14061),
+                               ("209.38.164.0/22", "US", 14061),
+                               ("209.38.168.0/22", "US", 14061),
+                               ("209.38.172.0/22", "US", 14061),
+                               ("209.38.176.0/20", "US", 14061),
+                               ("209.38.192.0/19", "US", 14061),
+                               ("209.38.224.0/19", "US", 14061),
+                       ),
+                       (
+                               "209.38.0.0/16",
+                               "209.38.1.0/24",
+                               "209.38.160.0/19",
+                               "209.38.192.0/18",
+                       ),
+               )
+
+
+if __name__ == "__main__":
+       unittest.main()
diff --git a/tests/python/test-database.py b/tests/python/test-database.py
new file mode 100755 (executable)
index 0000000..1c3448b
--- /dev/null
@@ -0,0 +1,144 @@
+#!/usr/bin/python3
+###############################################################################
+#                                                                             #
+# libloc - A library to determine the location of someone on the Internet     #
+#                                                                             #
+# Copyright (C) 2022 IPFire Development Team <info@ipfire.org>                #
+#                                                                             #
+# This library is free software; you can redistribute it and/or               #
+# modify it under the terms of the GNU Lesser General Public                  #
+# License as published by the Free Software Foundation; either                #
+# version 2.1 of the License, or (at your option) any later version.          #
+#                                                                             #
+# This library is distributed in the hope that it will be useful,             #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of              #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU           #
+# Lesser General Public License for more details.                             #
+#                                                                             #
+###############################################################################
+
+import location
+import os
+import unittest
+
+TEST_DATA_DIR = os.environ["TEST_DATA_DIR"]
+
+class Test(unittest.TestCase):
+       def setUp(self):
+               path = os.path.join(TEST_DATA_DIR, "database.db")
+
+               # Load the database
+               self.db = location.Database(path)
+
+       def test_metadata(self):
+               """
+                       Check if any metadata matches what we expected
+               """
+               # Vendor
+               self.assertEqual(self.db.vendor, "IPFire Project")
+
+               # Description
+               self.assertEqual(self.db.description,
+                       "This database has been obtained from https://location.ipfire.org/\n\nFind the full license terms at https://creativecommons.org/licenses/by-sa/4.0/")
+
+               # License
+               self.assertEqual(self.db.license, "CC BY-SA 4.0")
+
+               # Created At
+               self.assertIsInstance(self.db.created_at, int)
+
+       def test_fetch_network(self):
+               """
+                       Try fetching some results that should exist
+               """
+               n = self.db.lookup("81.3.27.38")
+               self.assertIsInstance(n, location.Network)
+
+               n = self.db.lookup("1.1.1.1")
+               self.assertIsInstance(n, location.Network)
+
+               n = self.db.lookup("8.8.8.8")
+               self.assertIsInstance(n, location.Network)
+
+       def test_fetch_network_nonexistant(self):
+               """
+                       Try to fetch something that should not exist
+               """
+               n = self.db.lookup("255.255.255.255")
+               self.assertIsNone(n)
+
+       def test_fetch_network_invalid(self):
+               """
+                       Feed some invalid inputs into the lookup function
+               """
+               with self.assertRaises(ValueError):
+                       self.db.lookup("XXX")
+
+               with self.assertRaises(ValueError):
+                       self.db.lookup("455.455.455.455")
+
+       def test_verify(self):
+               """
+                       Verify the database
+               """
+               # Path to the signature file
+               path = os.path.join(TEST_DATA_DIR, "signing-key.pem")
+
+               # Try to verify with an invalid signature
+               with self.assertRaises(TypeError):
+                       self.db.verify(None)
+
+               # Perform verification with the correct key
+               with open(path, "r") as f:
+                       self.assertTrue(self.db.verify(f))
+
+               # Perform verification with invalid keys
+               with open("/dev/null", "r") as f:
+                       self.assertFalse(self.db.verify(f))
+
+               with open("/dev/urandom", "r") as f:
+                       self.assertFalse(self.db.verify(f))
+
+       def test_search_as(self):
+               """
+                       Try to fetch an AS
+               """
+               # Fetch an existing AS
+               self.assertIsInstance(self.db.get_as(204867), location.AS)
+
+               # Fetch a non-existing AS
+               self.assertIsNone(self.db.get_as(0))
+
+               # Fetch an AS with a number that is out of range
+               with self.assertRaises(OverflowError):
+                       self.db.get_as(2**32 + 1)
+
+       def test_get_country(self):
+               """
+                       Try fetching a country
+               """
+               # Fetch an existing country
+               self.assertIsInstance(self.db.get_country("DE"), location.Country)
+
+               # Fetch a non-existing country
+               self.assertIsNone(self.db.get_country("AA"))
+
+               # Fetch a country with an invalid country code
+               with self.assertRaises(ValueError):
+                       self.db.get_country("XXX")
+
+       def test_list_bogons(self):
+               """
+                       Generate a list of bogons
+               """
+               # Fetch all bogons
+               bogons = self.db.list_bogons()
+
+               # We should have received an enumerator full of networks
+               self.assertIsInstance(bogons, location.DatabaseEnumerator)
+               for bogon in bogons:
+                       self.assertIsInstance(bogon, location.Network)
+
+
+if __name__ == "__main__":
+       unittest.main()
diff --git a/tests/python/test-export.py b/tests/python/test-export.py
new file mode 100755 (executable)
index 0000000..6921861
--- /dev/null
@@ -0,0 +1,53 @@
+#!/usr/bin/python3
+###############################################################################
+#                                                                             #
+# libloc - A library to determine the location of someone on the Internet     #
+#                                                                             #
+# Copyright (C) 2022 IPFire Development Team <info@ipfire.org>                #
+#                                                                             #
+# This library is free software; you can redistribute it and/or               #
+# modify it under the terms of the GNU Lesser General Public                  #
+# License as published by the Free Software Foundation; either                #
+# version 2.1 of the License, or (at your option) any later version.          #
+#                                                                             #
+# This library is distributed in the hope that it will be useful,             #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of              #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU           #
+# Lesser General Public License for more details.                             #
+#                                                                             #
+###############################################################################
+
+import location
+import os
+import unittest
+
+TEST_DATA_DIR = os.environ["TEST_DATA_DIR"]
+
+class Test(unittest.TestCase):
+       def setUp(self):
+               path = os.path.join(TEST_DATA_DIR, "database.db")
+
+               # Load the database
+               self.db = location.Database(path)
+
+       def test_list_networks(self):
+               """
+                       Lists all available networks
+               """
+               for network in self.db.networks:
+                       print(network)
+
+       def test_list_networks_flattened(self):
+               """
+                       Lists all networks but flattened
+               """
+               for i, network in enumerate(self.db.networks_flattened):
+                       # Break after the first 1000 iterations
+                       if i >= 1000:
+                               break
+
+                       print(network)
+
+
+if __name__ == "__main__":
+       unittest.main()
diff --git a/tools/copy.py b/tools/copy.py
new file mode 100644 (file)
index 0000000..39129c0
--- /dev/null
@@ -0,0 +1,108 @@
+#!/usr/bin/python3
+###############################################################################
+#                                                                             #
+# libloc - A library to determine the location of someone on the Internet     #
+#                                                                             #
+# Copyright (C) 2024 IPFire Development Team <info@ipfire.org>                #
+#                                                                             #
+# This library is free software; you can redistribute it and/or               #
+# modify it under the terms of the GNU Lesser General Public                  #
+# License as published by the Free Software Foundation; either                #
+# version 2.1 of the License, or (at your option) any later version.          #
+#                                                                             #
+# This library is distributed in the hope that it will be useful,             #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of              #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU           #
+# Lesser General Public License for more details.                             #
+#                                                                             #
+###############################################################################
+
+import argparse
+
+import location
+from location.i18n import _
+
+flags = (
+       location.NETWORK_FLAG_ANONYMOUS_PROXY,
+       location.NETWORK_FLAG_SATELLITE_PROVIDER,
+       location.NETWORK_FLAG_ANYCAST,
+       location.NETWORK_FLAG_DROP,
+)
+
+def copy_all(db, writer):
+       # Copy vendor
+       if db.vendor:
+               writer.vendor = db.vendor
+
+       # Copy description
+       if db.description:
+               writer.description = db.description
+
+       # Copy license
+       if db.license:
+               writer.license = db.license
+
+       # Copy all ASes
+       for old in db.ases:
+               new = writer.add_as(old.number)
+               new.name = old.name
+
+       # Copy all networks
+       for old in db.networks:
+               new = writer.add_network("%s" % old)
+
+               # Copy country code
+               new.country_code = old.country_code
+
+               # Copy ASN
+               if old.asn:
+                       new.asn = old.asn
+
+               # Copy flags
+               for flag in flags:
+                       if old.has_flag(flag):
+                               new.set_flag(flag)
+
+       # Copy countries
+       for old in db.countries:
+               new = writer.add_country(old.code)
+
+               # Copy continent code
+               new.continent_code = old.continent_code
+
+               # Copy name
+               new.name = old.name
+
+def main():
+       """
+               Main Function
+       """
+       parser = argparse.ArgumentParser(
+               description=_("Copies a location database"),
+       )
+
+       # Input File
+       parser.add_argument("input-file", help=_("File to read"))
+
+       # Output File
+       parser.add_argument("output-file", help=_("File to write"))
+
+       # Parse arguments
+       args = parser.parse_args()
+
+       input_file  = getattr(args, "input-file")
+       output_file = getattr(args, "output-file")
+
+       # Open the database
+       db = location.Database(input_file)
+
+       # Create a new writer
+       writer = location.Writer()
+
+       # Copy everything
+       copy_all(db, writer)
+
+       # Write the new file
+       writer.write(output_file)
+
+main()