BINDINGS += perl
endif
+bashcompletiondir = @bashcompletiondir@
+
AM_CPPFLAGS = \
-include $(top_builddir)/config.h \
-DSYSCONFDIR=\""$(sysconfdir)"\" \
endif
LIBLOC_CURRENT=1
-LIBLOC_REVISION=2
+LIBLOC_REVISION=3
LIBLOC_AGE=0
DISTCHECK_CONFIGURE_FLAGS = \
CLEANFILES += \
src/libloc.pc
+if BUILD_BASH_COMPLETION
+bashcompletion_DATA = \
+ bash-completion/location
+endif
+
+EXTRA_DIST += \
+ bash-completion/location
+
dist_pkgpython_PYTHON = \
src/python/location/__init__.py \
src/python/location/database.py \
--- /dev/null
+# location(1) completion -*- shell-script -*-
+#
+# bash-completion - part of libloc
+#
+# Copyright (C) 2020,2023 Hans-Christoph Steiner <hans@eds.org>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation, either version 2.1 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+__location_init() {
+ if type -t _init_completion >/dev/null; then
+ _init_completion -n : || return
+ else
+ # manual initialization for older bash completion versions
+ COMPREPLY=()
+ cur="${COMP_WORDS[COMP_CWORD]}"
+ prev="${COMP_WORDS[COMP_CWORD-1]}"
+ fi
+
+ (( $# >= 1 )) && __complete_${1}
+ __ltrim_colon_completions "$cur"
+}
+
+__complete_options() {
+ case "${prev}" in
+ --directory)
+ _filedir -d
+ return 0;;
+ --cron)
+ COMPREPLY=( $( compgen -W "daily weekly monthly" -- $cur ) )
+ return 0;;
+ --family)
+ COMPREPLY=( $( compgen -W "ipv6 ipv4" -- $cur ) )
+ return 0;;
+ --format)
+ COMPREPLY=( $( compgen -W "ipset list nftables xt_geoip" -- $cur ) )
+ return 0;;
+ esac
+
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help ${lopts}" -- $cur ) )
+ return 0;;
+ esac
+}
+
+__complete_dump() {
+ __complete_options
+}
+
+__complete_get_as() {
+ __complete_options
+}
+
+__complete_export() {
+ lopts="--directory --family --format"
+ __complete_options
+}
+
+__complete_list_networks_by_as() {
+ lopts="--family --format"
+ __complete_options
+}
+
+__complete_list_networks_by_cc() {
+ lopts="--family --format"
+ __complete_options
+}
+
+__complete_list_networks_by_flags() {
+ lopts="--anonymous-proxy --satellite-provider --anycast --drop --family --format"
+ __complete_options
+}
+
+__complete_list_bogons() {
+ lopts="--family --format"
+ __complete_options
+}
+
+__complete_list_countries() {
+ lopts="--show-name --show-continent"
+ __complete_options
+}
+
+__complete_lookup() {
+ __complete_options
+}
+
+__complete_search_as() {
+ __complete_options
+}
+
+__complete_update() {
+ lopts="--cron"
+ __complete_options
+}
+
+__complete_version() {
+ __complete_options
+}
+
+__complete_verify() {
+ __complete_options
+}
+
+# for f in `location|grep -Eo '[a-z,-]+,[a-z,-]+'| sed 's/,/ /g'`; do printf '%s \\\n' $f; done|sort -u
+__cmds=" \
+dump \
+export \
+get-as \
+list-bogons \
+list-countries \
+list-networks-by-as \
+list-networks-by-cc \
+list-networks-by-flags \
+lookup \
+search-as \
+update \
+verify \
+version \
+"
+
+for c in $__cmds; do
+ eval "_location_${c} () {
+ local cur prev lopts
+ __location_init ${c//-/_}
+ }"
+done
+
+_location() {
+ local cmd
+ cmd=${COMP_WORDS[1]}
+
+ [[ $__cmds == *\ $cmd\ * ]] && _location_${cmd} || {
+ (($COMP_CWORD == 1)) && COMPREPLY=( $( compgen -W "${__cmds}" -- $cmd ) )
+ }
+}
+
+complete -F _location location
+
+return 0
AC_PREREQ(2.60)
AC_INIT([libloc],
- [0.9.16],
+ [0.9.17],
[location@lists.ipfire.org],
[libloc],
[https://location.ipfire.org/])
if test "${have_man_pages}" = "yes" && test -z "${ASCIIDOC}"; then
AC_MSG_ERROR([Required program 'asciidoc' not found])
fi
+
+# - pkg-config -----------------------------------------------------------------
+
+m4_ifndef([PKG_PROG_PKG_CONFIG],
+ [m4_fatal([Could not locate the pkg-config autoconf
+ macros. These are usually located in /usr/share/aclocal/pkg.m4.
+ If your macros are in a different location, try setting the
+ environment variable AL_OPTS="-I/other/macro/dir" before running
+ ./autogen.sh or autoreconf again. Make sure pkg-config is installed.])])
+
+PKG_PROG_PKG_CONFIG
+PKG_INSTALLDIR(['${usrlib_execdir}/pkgconfig'])
+
+# - bash-completion ------------------------------------------------------------
+
+#enable_bash_completion=yes
+AC_ARG_WITH([bashcompletiondir],
+ AS_HELP_STRING([--with-bashcompletiondir=DIR], [Bash completions directory]),
+ [],
+ [AS_IF([`$PKG_CONFIG --exists bash-completion`], [
+ with_bashcompletiondir=`$PKG_CONFIG --variable=completionsdir bash-completion`
+ ], [
+ with_bashcompletiondir=${datadir}/bash-completion/completions
+ ])
+])
+
+AC_SUBST([bashcompletiondir], [$with_bashcompletiondir])
+
+AC_ARG_ENABLE([bash-completion],
+ AS_HELP_STRING([--disable-bash-completion], [do not install bash completion files]),
+ [], [enable_bash_completion=yes]
+)
+
+AM_CONDITIONAL([BUILD_BASH_COMPLETION], [test "x$enable_bash_completion" = xyes])
+
# - debug ----------------------------------------------------------------------
AC_ARG_ENABLE([debug],
database path: ${with_database_path}
debug: ${enable_debug}
systemd support: ${have_systemd}
+ bash-completion: ${enable_bash_completion}
Bindings:
Perl: ${enable_perl}
+libloc (0.9.17-1) unstable; urgency=medium
+
+ [ Michael Tremer ]
+ * importer: Store geofeed URLs from RIR data
+ * importer: Add command to import geofeeds into the database
+ * importer: Just fetch any exception from the executor
+ * importer: Sync geofeeds
+ * importer: Use geofeeds for country assignment
+ * importer: Use a GIST index for networks from geofeeds
+ * importer: Add a search index match geofeed networks quicker
+ * importer: Fix reading Geofeeds from remarks
+ * importer: Ensure that we only use HTTPS URLs for Geofeeds
+ * importer: Validate country codes from Geofeeds
+ * importer: Fix parsing gzipped content on invalid Content-Type header
+ * po: Update translations
+ * network: Drop an unused function to count all networks
+ * location: Fix correct set name when family is selected
+ * export: Raise an error when trying to export ipset for both families
+ * Merge remote-tracking branch 'origin/geofeed'
+ * importer: Drop method to import routing information from route
+ servers
+ * importer: Silently ignore any empty country codes in Geofeeds
+ * importer: Convert country codes to uppercase from Geofeeds
+ * importer: Skip lines we cannot decode
+ * importer: Silence invalid country code warning
+ * importer: Catch TimeoutError when loading Geofeeds
+ * importer: Log any errors to the database
+ * geofeeds: Delete any data on 404
+ * geofeeds: Delete any data that did not update within two weeks
+ * geofeeds: Catch any invalid URLs
+ * database: Log query execution time in debug mode
+ * importer: Improve performance of AS name export query
+ * geofeed: Parse and normalize any URLs
+ * importer: AWS: Add country code of NZ for ap-southeast-5
+ * importer: Don't write AS without names into the database
+ * importer: Decrease the log level if Spamhaus' files are empty
+ * tree: Add flag to delete nodes
+ * writer: Cleanup networks before writing
+ * tree: Actually delete any deleted nodes
+ * Merge networks before writing the database
+ * networks: Delete networks from the tree on merge
+ * tree: More elegantly prevent deleting the root node
+ * network: Decreate log level when deleting networks
+ * data: Update database to 2023-07-31
+ * configure: Bump version to 0.9.17
+
+ [ Temuri Doghonadze ]
+ * po: Add Georgian translation
+
+ [ Hans-Christoph Steiner ]
+ * Add bash-completion file for the location command.
+
+ [ Stefan Schantl ]
+ * Install bash-completion files.
+
+ [ Petr Písař ]
+ * Fix string escaping in location tool
+
+ -- Michael Tremer <michael.tremer@ipfire.org> Mon, 31 Jul 2023 16:59:38 +0000
+
+libloc (0.9.16-1) unstable; urgency=medium
+
+ [ Peter Müller ]
+ * location-importer.in: Conduct sanity checks per DROP list
+ * location-importer.in: Add new Amazon region codes
+
+ [ Michael Tremer ]
+ * importer: Fix potential SQL command injection
+ * configure: Fix incorrect database path
+ * python: Export __version__ in location module
+ * writer: Add an empty string to the stringpool
+ * export: Fix generating file names for ipset output
+ * database: Ship a recent default database
+ * tests: Drop the test database and use the distributed one
+ * database: Correct error code on verification
+ * writer: Fix typo in signature variable
+ * writer: Assign correct file descriptor for private keys
+ * database: Fix check if a signature is set
+ * configure: Drop superfluous bracket
+ * configure: Bump version to 0.9.16
+
+ [ Petr Písař ]
+ * Move location manual from section 8 to section 1
+ * Remove shebangs from Python modules
+ * Move location manual from section 8 to section 1 in location-
+ update.service
+ * Install Perl files to Perl vendor directory
+
+ -- Michael Tremer <michael.tremer@ipfire.org> Sat, 29 Oct 2022 13:25:36 +0000
+
libloc (0.9.15-1) unstable; urgency=medium
[ Peter Müller ]
usr/bin
+usr/share/bash-completion/completions/location
+var/lib/location/database.db
var/lib/location/signing-key.pem
lib/systemd/system
-usr/share/man/man8
+usr/share/man/man1
+src/cron/location-update.in
src/libloc.pc.in
src/python/location/__init__.py
src/python/location/database.py
msgstr ""
"Project-Id-Version: libloc 0\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2021-04-15 11:29+0000\n"
+"POT-Creation-Date: 2022-10-29 12:46+0000\n"
"PO-Revision-Date: 2018-02-01 14:05+0000\n"
"Last-Translator: Michael Tremer <michael.tremer@ipfire.org>\n"
"Language-Team: German\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+msgid "Won't write binary output to stdout"
+msgstr ""
+
msgid "Location Importer Command Line Interface"
msgstr ""
msgid "Hostile Networks safe to drop"
msgstr ""
+msgid "Lists all bogons"
+msgstr ""
+
msgid "Lists all countries"
msgstr ""
msgid "Anycast"
msgstr ""
+msgid "Hostile Network safe to drop"
+msgstr ""
+
#, python-format
msgid "Invalid ASN: %s"
msgstr ""
--- /dev/null
+# Georgian translation for libloc.
+# Copyright (C) 2023 libloc's authors.
+# This file is distributed under the same license as the libloc package.
+# Temuri Doghonadze <temuri.doghonadze@gmail.com>, 2023.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: libloc\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2022-10-29 12:46+0000\n"
+"PO-Revision-Date: 2023-02-22 08:57+0100\n"
+"Last-Translator: Temuri Doghonadze <temuri.doghonadze@gmail.com>\n"
+"Language-Team: Georgian <(nothing)>\n"
+"Language: ka\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+"X-Generator: Poedit 3.2.2\n"
+
+msgid "Won't write binary output to stdout"
+msgstr "ბინარული მონაცემები stdout-ზე გამოტანილი არ იქნება"
+
+msgid "Location Importer Command Line Interface"
+msgstr "მდებარეობის შემოტანის ბრძანების სტრიქონის ინტერფეისი"
+
+msgid "Enable debug output"
+msgstr "გამართვის გამოტანის ჩართვა"
+
+msgid "Enable quiet mode"
+msgstr "ჩუმი რეჟიმის ჩართვა"
+
+msgid "Database Hostname"
+msgstr "ბაზის ჰოსტის სახელი"
+
+msgid "HOST"
+msgstr "ჰოსტი"
+
+msgid "Database Name"
+msgstr "ბაზის სახელი"
+
+msgid "NAME"
+msgstr "სახელი"
+
+msgid "Database Username"
+msgstr "ბაზის მომხმარებლის სახელი"
+
+msgid "USERNAME"
+msgstr "მომხმარებლის სახელი"
+
+msgid "Database Password"
+msgstr "მონაცემთა ბაზის პაროლი"
+
+msgid "PASSWORD"
+msgstr "პაროლი"
+
+#. Write Database
+msgid "Write database to file"
+msgstr "მონაცემთა ბაზის ფაილში ჩაწრა"
+
+msgid "Database File"
+msgstr "ბაზის ფაილი"
+
+msgid "Signing Key"
+msgstr "ხელმოწერის გასაღები"
+
+msgid "Backup Signing Key"
+msgstr "სარეზერვო ხელმოწერის გასაღები"
+
+msgid "Sets the vendor"
+msgstr "მომწოდებლის დაყენება"
+
+msgid "Sets a description"
+msgstr "აღწერის დაყენება"
+
+msgid "Sets the license"
+msgstr "ლიცენზიის დაყენება"
+
+msgid "Database Format Version"
+msgstr "ბაზის ფორმატის ვერსია"
+
+#. Update WHOIS
+msgid "Update WHOIS Information"
+msgstr "WHOIS-ის ინფორმაციის განახლება"
+
+msgid "Update BGP Annoucements"
+msgstr "BGP-ის ანონსების განახლება"
+
+msgid "Route Server to connect to"
+msgstr "რომელ რაუტის სერვერს დავუკავშირდე"
+
+msgid "SERVER"
+msgstr "სერვერი"
+
+msgid "Update overrides"
+msgstr "განახლება გადაფარავს"
+
+msgid "Files to import"
+msgstr "შემოსატანი ფაილები"
+
+msgid "Import countries"
+msgstr "ქვეყნების შემოტანა"
+
+msgid "File to import"
+msgstr "შემოსატანი ფაილი"
+
+msgid "Location Database Command Line Interface"
+msgstr "მდებარეობის ბაზის ბრძანების სტრიქონის ინტერფეისი"
+
+msgid "Path to database"
+msgstr "ბილიკი ბაზამდე"
+
+msgid "Public Signing Key"
+msgstr "საჯარო ხელმოწერის გასაღები"
+
+msgid "Show database version"
+msgstr "ბაზის ვერსიის ჩვენება"
+
+msgid "Lookup one or multiple IP addresses"
+msgstr "ერთი ან რამდენიმე IP მისამართის მოზებნა"
+
+msgid "Dump the entire database"
+msgstr "მთელი ბაზის დამპი"
+
+#. Update
+msgid "Update database"
+msgstr "ბაზის განახლება"
+
+msgid "Update the library only once per interval"
+msgstr "ბიბლიოთეკის მხოლოდ მითითებულ ინტერვალში განახლება"
+
+msgid "Verify the downloaded database"
+msgstr "გადმოწერილი ბაზის შემოწმება"
+
+msgid "Get information about one or multiple Autonomous Systems"
+msgstr "ერთ ან მეტ ავტონომიურ სისტემაზე ინფორმაციის მიღება"
+
+msgid "Search for Autonomous Systems that match the string"
+msgstr "ავტონომიური სისტემების ძებნა, რომლებიც სტრიქონს ემთხვევა"
+
+msgid "Lists all networks in an AS"
+msgstr "AS-ში ყველა ქსელის სია"
+
+msgid "Lists all networks in a country"
+msgstr "ქვეყნის ყველა ქსელის სია"
+
+msgid "Lists all networks with flags"
+msgstr "ქსელების ალმებით ჩვენება"
+
+msgid "Anonymous Proxies"
+msgstr "ანონიმური პროქსები"
+
+msgid "Satellite Providers"
+msgstr "სატელიტური პროვაიდერები"
+
+msgid "Anycasts"
+msgstr "Anycasts"
+
+msgid "Hostile Networks safe to drop"
+msgstr "უსაფრთხოდ დაბლოკვადი მტრული ქსელები"
+
+msgid "Lists all bogons"
+msgstr "ყველა ჭაობის სია"
+
+msgid "Lists all countries"
+msgstr "ყველა ქვეყნის სია"
+
+msgid "Show the name of the country"
+msgstr "ქვეყნის სახელის ჩვენება"
+
+msgid "Show the continent"
+msgstr "კონტინენტის ჩვენება"
+
+msgid "Exports data in many formats to load it into packet filters"
+msgstr "მონაცემების ბევრ ფორმატში გატანა მათი პაკეტის ფილტრებში ჩასატვირთად"
+
+msgid "Output format"
+msgstr "გამოტანის ფორმატი"
+
+msgid "Output directory"
+msgstr "გამოტანის საქაღალდე"
+
+msgid "Specify address family"
+msgstr "მიუთითეთ მისამართის ოჯახი"
+
+msgid "List country codes or ASNs to export"
+msgstr "ქვეყნის კოდების ან ASN-ების სია გასატანად"
+
+#, python-format
+msgid "Invalid IP address: %s"
+msgstr "არასწორი IP მისამართი: %s"
+
+#, python-format
+msgid "Nothing found for %(address)s"
+msgstr "%(address)s-სთვის ვერაფერი ვიპოვე"
+
+msgid "Network"
+msgstr "ქსელი"
+
+msgid "Country"
+msgstr "ქვეყანა"
+
+msgid "Autonomous System"
+msgstr "ავტონომიური სისტემა"
+
+msgid "Anonymous Proxy"
+msgstr "ანონიმური პროქსი"
+
+msgid "yes"
+msgstr "დიახ"
+
+msgid "Satellite Provider"
+msgstr "სატელიტური პროვაიდერი"
+
+msgid "Anycast"
+msgstr "Anycast"
+
+msgid "Hostile Network safe to drop"
+msgstr "უსაფრთხოდ დაბლოკვადი მტრული ქსელი"
+
+#, python-format
+msgid "Invalid ASN: %s"
+msgstr "არასწორი ASN: %s"
+
+#, python-format
+msgid "Could not find AS%s"
+msgstr "ვერ ვიპოვნე AS%s"
+
+#, python-format
+msgid "AS%(asn)s belongs to %(name)s"
+msgstr "AS%(asn)s ეკუთვნის %(name)s"
+
+msgid "The database has been updated recently"
+msgstr "ბაზა ახლახანს განახლდა"
+
+msgid "You must at least pass one flag"
+msgstr "აუცილებელია, ერთი ალამი მაინც გადასცეთ"
+
+#, python-format
+msgid "One Day"
+msgid_plural "%(days)s Days"
+msgstr[0] "1 დღე"
+msgstr[1] "%(days)s დღე"
+
+#, python-format
+msgid "One Hour"
+msgid_plural "%(hours)s Hours"
+msgstr[0] "1 საათი"
+msgstr[1] "%(hours)s საათი"
+
+#, python-format
+msgid "One Minute"
+msgid_plural "%(minutes)s Minutes"
+msgstr[0] "1 წუთი"
+msgstr[1] "%(minutes)s წუთი"
+
+#, python-format
+msgid "One Second"
+msgid_plural "%(seconds)s Seconds"
+msgstr[0] "1 წამი"
+msgstr[1] "%(seconds)s წამი"
+
+msgid "Now"
+msgstr "ახლა"
+
+#, python-format
+msgid "%s ago"
+msgstr "%s-ის წინ"
}
}
+static inline int loc_country_code_cmp(const char* cc1, const char* cc2) {
+ return memcmp(cc1, cc2, 2);
+}
+
#endif
#endif
int loc_network_list_push(struct loc_network_list* list, struct loc_network* network);
struct loc_network* loc_network_list_pop(struct loc_network_list* list);
struct loc_network* loc_network_list_pop_first(struct loc_network_list* list);
+int loc_network_list_remove(struct loc_network_list* list, struct loc_network* network);
int loc_network_list_contains(struct loc_network_list* list, struct loc_network* network);
int loc_network_list_merge(struct loc_network_list* self, struct loc_network_list* other);
+void loc_network_list_remove_with_prefix_smaller_than(
+ struct loc_network_list* list, const unsigned int prefix);
+
#ifdef LIBLOC_PRIVATE
#include <netinet/in.h>
int(*callback)(struct loc_network* network, void* data), void* data);
int loc_network_tree_dump(struct loc_network_tree* tree);
int loc_network_tree_add_network(struct loc_network_tree* tree, struct loc_network* network);
-size_t loc_network_tree_count_networks(struct loc_network_tree* tree);
size_t loc_network_tree_count_nodes(struct loc_network_tree* tree);
struct loc_network_tree_node;
int loc_network_tree_node_is_leaf(struct loc_network_tree_node* node);
struct loc_network* loc_network_tree_node_get_network(struct loc_network_tree_node* node);
+int loc_network_tree_cleanup(struct loc_network_tree* tree);
+
#endif
#endif
return network;
}
+int loc_network_list_remove(struct loc_network_list* list, struct loc_network* network) {
+ int found = 0;
+
+ // Find the network on the list
+ off_t index = loc_network_list_find(list, network, &found);
+
+ // Nothing to do if the network wasn't found
+ if (!found)
+ return 0;
+
+ // Dereference the network at the position
+ loc_network_unref(list->elements[index]);
+
+ // Move all other elements back
+ for (unsigned int i = index; i < list->size - 1; i++)
+ list->elements[i] = list->elements[i+1];
+
+ // The list is shorter now
+ --list->size;
+
+ return 0;
+}
+
LOC_EXPORT int loc_network_list_contains(struct loc_network_list* list, struct loc_network* network) {
int found = 0;
return 0;
}
+
+void loc_network_list_remove_with_prefix_smaller_than(
+ struct loc_network_list* list, const unsigned int prefix) {
+ unsigned int p = 0;
+
+ // Count how many networks were removed
+ unsigned int removed = 0;
+
+ for (unsigned int i = 0; i < list->size; i++) {
+ // Fetch the prefix
+ p = loc_network_prefix(list->elements[i]);
+
+ if (p > prefix) {
+ // Drop this network
+ loc_network_unref(list->elements[i]);
+
+ // Increment counter
+ removed++;
+
+ continue;
+ }
+
+ // Move pointers backwards to keep the list filled
+ list->elements[i - removed] = list->elements[i];
+ }
+
+ // Adjust size
+ list->size -= removed;
+
+ return;
+}
return 0;
}
+static int loc_network_properties_cmp(struct loc_network* self, struct loc_network* other) {
+ int r;
+
+ // Check country code
+ r = loc_country_code_cmp(self->country_code, other->country_code);
+ if (r)
+ return r;
+
+ // Check ASN
+ if (self->asn > other->asn)
+ return 1;
+ else if (self->asn < other->asn)
+ return -1;
+
+ // Check flags
+ if (self->flags > other->flags)
+ return 1;
+ else if (self->flags < other->flags)
+ return -1;
+
+ return 0;
+}
+
LOC_EXPORT int loc_network_overlaps(struct loc_network* self, struct loc_network* other) {
// Either of the start addresses must be in the other subnet
if (loc_network_matches_address(self, &other->first_address))
return subnets;
}
+static int loc_network_merge(struct loc_network** n,
+ struct loc_network* n1, struct loc_network* n2) {
+ struct loc_network* network = NULL;
+ struct in6_addr address;
+ int r;
+
+ // Reset pointer
+ *n = NULL;
+
+ // Family must match
+ if (n1->family != n2->family)
+ return 0;
+
+ // The prefix must match, too
+ if (n1->prefix != n2->prefix)
+ return 0;
+
+ // Cannot merge ::/0 or 0.0.0.0/0
+ if (!n1->prefix || !n2->prefix)
+ return 0;
+
+ const unsigned int prefix = loc_network_prefix(n1);
+
+ // How many bits do we need to represent this address?
+ const size_t bitlength = loc_address_bit_length(&n1->first_address) - 1;
+
+ // We cannot shorten this any more
+ if (bitlength == prefix)
+ return 0;
+
+ // Increment the last address of the first network
+ address = n1->last_address;
+ loc_address_increment(&address);
+
+ // If they don't match they are not neighbours
+ if (loc_address_cmp(&address, &n2->first_address) != 0)
+ return 0;
+
+ // All properties must match, too
+ if (loc_network_properties_cmp(n1, n2) != 0)
+ return 0;
+
+ // Create a new network object
+ r = loc_network_new(n1->ctx, &network, &n1->first_address, prefix - 1);
+ if (r)
+ return r;
+
+ // Copy everything else
+ loc_country_code_copy(network->country_code, n1->country_code);
+ network->asn = n1->asn;
+ network->flags = n1->flags;
+
+ // Return pointer
+ *n = network;
+
+ return 0;
+}
+
int loc_network_to_database_v1(struct loc_network* network, struct loc_database_network_v1* dbobj) {
// Add country code
loc_country_code_copy(dbobj->country_code, network->country_code);
struct loc_network_tree_node* one;
struct loc_network* network;
+
+ // Set if deleted
+ int deleted:1;
};
int loc_network_tree_new(struct loc_ctx* ctx, struct loc_network_tree** tree) {
}
static struct loc_network_tree_node* loc_network_tree_get_node(struct loc_network_tree_node* node, int path) {
- struct loc_network_tree_node** n;
+ struct loc_network_tree_node** n = NULL;
+ int r;
- if (path == 0)
- n = &node->zero;
- else
- n = &node->one;
+ switch (path) {
+ case 0:
+ n = &node->zero;
+ break;
+
+ case 1:
+ n = &node->one;
+ break;
+
+ default:
+ errno = EINVAL;
+ return NULL;
+ }
+
+ // If the node existed, but has been deleted, we undelete it
+ if (*n && (*n)->deleted) {
+ (*n)->deleted = 0;
// If the desired node doesn't exist, yet, we will create it
- if (*n == NULL) {
- int r = loc_network_tree_node_new(node->ctx, n);
+ } else if (!*n) {
+ r = loc_network_tree_node_new(node->ctx, n);
if (r)
return NULL;
}
int(*callback)(struct loc_network* network, void* data), void* data) {
int r;
+ // If the node has been deleted, don't process it
+ if (node->deleted)
+ return 0;
+
// Finding a network ends the walk here
if (node->network) {
if (filter_callback) {
// Check if node has not been set before
if (node->network) {
- DEBUG(tree->ctx, "There is already a network at this path\n");
+ DEBUG(tree->ctx, "There is already a network at this path: %s\n",
+ loc_network_str(node->network));
return -EBUSY;
}
return 0;
}
-static int __loc_network_tree_count(struct loc_network* network, void* data) {
- size_t* counter = (size_t*)data;
+static int loc_network_tree_delete_network(
+ struct loc_network_tree* tree, struct loc_network* network) {
+ struct loc_network_tree_node* node = NULL;
- // Increase the counter for each network
- counter++;
+ DEBUG(tree->ctx, "Deleting network %s from tree...\n", loc_network_str(network));
- return 0;
-}
+ node = loc_network_tree_get_path(tree, &network->first_address, network->prefix);
+ if (!node) {
+ ERROR(tree->ctx, "Network was not found in tree %s\n", loc_network_str(network));
+ return 1;
+ }
-size_t loc_network_tree_count_networks(struct loc_network_tree* tree) {
- size_t counter = 0;
+ // Drop the network
+ if (node->network) {
+ loc_network_unref(node->network);
+ node->network = NULL;
+ }
- int r = loc_network_tree_walk(tree, NULL, __loc_network_tree_count, &counter);
- if (r)
- return r;
+ // Mark the node as deleted if it was a leaf
+ if (!node->zero && !node->one)
+ node->deleted = 1;
- return counter;
+ return 0;
}
static size_t __loc_network_tree_count_nodes(struct loc_network_tree_node* node) {
size_t counter = 1;
+ // Don't count deleted nodes
+ if (node->deleted)
+ return 0;
+
if (node->zero)
counter += __loc_network_tree_count_nodes(node->zero);
struct loc_network* loc_network_tree_node_get_network(struct loc_network_tree_node* node) {
return loc_network_ref(node->network);
}
+
+/*
+ Merge the tree!
+*/
+
+struct loc_network_tree_merge_ctx {
+ struct loc_network_tree* tree;
+ struct loc_network_list* networks;
+ unsigned int merged;
+};
+
+static int loc_network_tree_merge_step(struct loc_network* network, void* data) {
+ struct loc_network_tree_merge_ctx* ctx = (struct loc_network_tree_merge_ctx*)data;
+ struct loc_network* n = NULL;
+ struct loc_network* m = NULL;
+ int r;
+
+ // How many networks do we have?
+ size_t i = loc_network_list_size(ctx->networks);
+
+ // If the list is empty, just add the network
+ if (i == 0)
+ return loc_network_list_push(ctx->networks, network);
+
+ while (i--) {
+ // Fetch the last network of the list
+ n = loc_network_list_get(ctx->networks, i);
+
+ // Try to merge the two networks
+ r = loc_network_merge(&m, n, network);
+ if (r)
+ goto ERROR;
+
+ // Did we get a result?
+ if (m) {
+ DEBUG(ctx->tree->ctx, "Merged networks %s + %s -> %s\n",
+ loc_network_str(n), loc_network_str(network), loc_network_str(m));
+
+ // Add the new network
+ r = loc_network_tree_add_network(ctx->tree, m);
+ switch (r) {
+ case 0:
+ break;
+
+ // There might already be a network
+ case -EBUSY:
+ r = 0;
+ goto ERROR;
+
+ default:
+ goto ERROR;
+ }
+
+ // Remove the merge networks
+ r = loc_network_tree_delete_network(ctx->tree, network);
+ if (r)
+ goto ERROR;
+
+ r = loc_network_tree_delete_network(ctx->tree, n);
+ if (r)
+ goto ERROR;
+
+ // Add the new network to the stack
+ r = loc_network_list_push(ctx->networks, m);
+ if (r)
+ goto ERROR;
+
+ // Remove the previous network from the stack
+ r = loc_network_list_remove(ctx->networks, n);
+ if (r)
+ goto ERROR;
+
+ // Count merges
+ ctx->merged++;
+
+ // Try merging the new network with others
+ r = loc_network_tree_merge_step(m, data);
+ if (r)
+ goto ERROR;
+
+ loc_network_unref(m);
+ m = NULL;
+
+ // Once we have found a merge, we are done
+ break;
+
+ // If we could not merge the two networks, we add the current one
+ } else {
+ r = loc_network_list_push(ctx->networks, network);
+ if (r)
+ goto ERROR;
+ }
+
+ loc_network_unref(n);
+ n = NULL;
+ }
+
+ const unsigned int prefix = loc_network_prefix(network);
+
+ // Remove any networks that we cannot merge
+ loc_network_list_remove_with_prefix_smaller_than(ctx->networks, prefix);
+
+ERROR:
+ if (m)
+ loc_network_unref(m);
+ if (n)
+ loc_network_unref(n);
+
+ return r;
+}
+
+static int loc_network_tree_merge(struct loc_network_tree* tree) {
+ struct loc_network_tree_merge_ctx ctx = {
+ .tree = tree,
+ .networks = NULL,
+ .merged = 0,
+ };
+ int r;
+
+ // Create a new list
+ r = loc_network_list_new(tree->ctx, &ctx.networks);
+ if (r)
+ goto ERROR;
+
+ // Walk through the entire tree
+ r = loc_network_tree_walk(tree, NULL, loc_network_tree_merge_step, &ctx);
+ if (r)
+ goto ERROR;
+
+ DEBUG(tree->ctx, "%u network(s) have been merged\n", ctx.merged);
+
+ERROR:
+ if (ctx.networks)
+ loc_network_list_unref(ctx.networks);
+
+ return r;
+}
+
+/*
+ Deduplicate the tree
+*/
+
+struct loc_network_tree_dedup_ctx {
+ struct loc_network_tree* tree;
+ struct loc_network* network;
+ unsigned int removed;
+};
+
+static int loc_network_tree_dedup_step(struct loc_network* network, void* data) {
+ struct loc_network_tree_dedup_ctx* ctx = (struct loc_network_tree_dedup_ctx*)data;
+
+ // First call when we have not seen any networks, yet
+ if (!ctx->network) {
+ ctx->network = loc_network_ref(network);
+ return 0;
+ }
+
+ // If network is a subnet of ctx->network, and all properties match,
+ // we can drop the network.
+ if (loc_network_is_subnet(ctx->network, network)) {
+ if (loc_network_properties_cmp(ctx->network, network) == 0) {
+ // Increment counter
+ ctx->removed++;
+
+ // Remove the network
+ return loc_network_tree_delete_network(ctx->tree, network);
+ }
+
+ return 0;
+ }
+
+ // Drop the reference to the previous network
+ if (ctx->network)
+ loc_network_unref(ctx->network);
+ ctx->network = loc_network_ref(network);
+
+ return 0;
+}
+
+static int loc_network_tree_dedup(struct loc_network_tree* tree) {
+ struct loc_network_tree_dedup_ctx ctx = {
+ .tree = tree,
+ .network = NULL,
+ .removed = 0,
+ };
+ int r;
+
+ // Walk through the entire tree
+ r = loc_network_tree_walk(tree, NULL, loc_network_tree_dedup_step, &ctx);
+ if (r)
+ goto ERROR;
+
+ DEBUG(tree->ctx, "%u network(s) have been removed\n", ctx.removed);
+
+ERROR:
+ if (ctx.network)
+ loc_network_unref(ctx.network);
+
+ return r;
+}
+
+static int loc_network_tree_delete_node(struct loc_network_tree* tree,
+ struct loc_network_tree_node** node) {
+ struct loc_network_tree_node* n = *node;
+ int r0 = 1;
+ int r1 = 1;
+
+ // Return for nodes that have already been deleted
+ if (n->deleted)
+ goto DELETE;
+
+ // Delete zero
+ if (n->zero) {
+ r0 = loc_network_tree_delete_node(tree, &n->zero);
+ if (r0 < 0)
+ return r0;
+ }
+
+ // Delete one
+ if (n->one) {
+ r1 = loc_network_tree_delete_node(tree, &n->one);
+ if (r1 < 0)
+ return r1;
+ }
+
+ // Don't delete this node if we are a leaf
+ if (n->network)
+ return 0;
+
+ // Don't delete this node if has child nodes that we need
+ if (!r0 || !r1)
+ return 0;
+
+ // Don't delete root
+ if (tree->root == n)
+ return 0;
+
+DELETE:
+ // It is now safe to delete the node
+ loc_network_tree_node_unref(n);
+ *node = NULL;
+
+ return 1;
+}
+
+static int loc_network_tree_delete_nodes(struct loc_network_tree* tree) {
+ int r;
+
+ r = loc_network_tree_delete_node(tree, &tree->root);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int loc_network_tree_cleanup(struct loc_network_tree* tree) {
+ int r;
+
+ // Deduplicate the tree
+ r = loc_network_tree_dedup(tree);
+ if (r)
+ return r;
+
+ // Merge networks
+ r = loc_network_tree_merge(tree);
+ if (r) {
+ ERROR(tree->ctx, "Could not merge networks: %m\n");
+ return r;
+ }
+
+ // Delete any unneeded nodes
+ r = loc_network_tree_delete_nodes(tree);
+ if (r)
+ return r;
+
+ return 0;
+}
import logging
import psycopg2
+import time
log = logging.getLogger("location.database")
log.propagate = 1
return self._db.cursor()
def _execute(self, cursor, query, parameters, kwparameters):
- log.debug("SQL Query: %s" % (query % (kwparameters or parameters)))
+ log.debug(
+ "Executing query: %s" % \
+ cursor.mogrify(query, kwparameters or parameters).decode(),
+ )
+
+ # Store the time when the query started
+ t = time.monotonic()
try:
return cursor.execute(query, kwparameters or parameters)
- except (OperationalError, psycopg2.ProgrammingError):
+
+ # Catch any errors
+ except OperationalError:
log.error("Error connecting to database on %s", self.host)
self.close()
raise
+ # Log how long the query took
+ finally:
+ # Determine duration the query took
+ d = time.monotonic() - t
+
+ log.debug("Query took %.2fms" % (d * 1000.0))
+
def transaction(self):
return Transaction(self)
# Count all networks
self.networks = 0
+ # Check that family is being set
+ if not self.family:
+ raise ValueError("%s requires family being set" % self.__class__.__name__)
+
@property
def hashsize(self):
"""
# Rewind the temporary file
t.seek(0)
+ gzip_compressed = False
+
# Fetch the content type
content_type = res.headers.get("Content-Type")
# Decompress any gzipped response on the fly
if content_type in ("application/x-gzip", "application/gzip"):
+ gzip_compressed = True
+
+ # Check for the gzip magic in case web servers send a different MIME type
+ elif t.read(2) == b"\x1f\x8b":
+ gzip_compressed = True
+
+ # Reset again
+ t.seek(0)
+
+ # Decompress the temporary file
+ if gzip_compressed:
+ log.debug("Gzip compression detected")
+
t = gzip.GzipFile(fileobj=t, mode="rb")
# Return the temporary file handle
###############################################################################
import argparse
+import concurrent.futures
+import http.client
import ipaddress
import json
import logging
import re
import socket
import sys
-import telnetlib
import urllib.error
# Load our location module
update_announcements.add_argument("server", nargs=1,
help=_("Route Server to connect to"), metavar=_("SERVER"))
+ # Update geofeeds
+ update_geofeeds = subparsers.add_parser("update-geofeeds",
+ help=_("Update Geofeeds"))
+ update_geofeeds.set_defaults(func=self.handle_update_geofeeds)
+
# Update overrides
update_overrides = subparsers.add_parser("update-overrides",
help=_("Update overrides"),
CREATE INDEX IF NOT EXISTS networks_family ON networks USING BTREE(family(network));
CREATE INDEX IF NOT EXISTS networks_search ON networks USING GIST(network inet_ops);
+ -- geofeeds
+ CREATE TABLE IF NOT EXISTS geofeeds(
+ id serial primary key,
+ url text,
+ status integer default null,
+ updated_at timestamp without time zone default null
+ );
+ ALTER TABLE geofeeds ADD COLUMN IF NOT EXISTS error text;
+ CREATE UNIQUE INDEX IF NOT EXISTS geofeeds_unique
+ ON geofeeds(url);
+ CREATE TABLE IF NOT EXISTS geofeed_networks(
+ geofeed_id integer references geofeeds(id) on delete cascade,
+ network inet,
+ country text,
+ region text,
+ city text
+ );
+ CREATE INDEX IF NOT EXISTS geofeed_networks_geofeed_id
+ ON geofeed_networks(geofeed_id);
+ CREATE INDEX IF NOT EXISTS geofeed_networks_search
+ ON geofeed_networks USING GIST(network inet_ops);
+ CREATE TABLE IF NOT EXISTS network_geofeeds(network inet, url text);
+ CREATE UNIQUE INDEX IF NOT EXISTS network_geofeeds_unique
+ ON network_geofeeds(network);
+ CREATE INDEX IF NOT EXISTS network_geofeeds_search
+ ON network_geofeeds USING GIST(network inet_ops);
+ CREATE INDEX IF NOT EXISTS network_geofeeds_url
+ ON network_geofeeds(url);
+
-- overrides
CREATE TABLE IF NOT EXISTS autnum_overrides(
number bigint NOT NULL,
SELECT
autnums.number AS number,
COALESCE(
- (SELECT overrides.name FROM autnum_overrides overrides
- WHERE overrides.number = autnums.number),
+ overrides.name,
autnums.name
) AS name
- FROM autnums
- WHERE name <> %s ORDER BY number
- """, "")
+ FROM
+ autnums
+ LEFT JOIN
+ autnum_overrides overrides ON autnums.number = overrides.number
+ ORDER BY
+ autnums.number
+ """)
for row in rows:
+ # Skip AS without names
+ if not row.name:
+ continue
+
a = writer.add_as(row.number)
a.name = row.name
SELECT network FROM networks
UNION
SELECT network FROM network_overrides
+ UNION
+ SELECT network FROM geofeed_networks
),
ordered_networks AS (
SELECT country FROM autnum_overrides overrides
WHERE networks.autnum = overrides.number
),
+ (
+ SELECT
+ geofeed_networks.country AS country
+ FROM
+ network_geofeeds
+
+ -- Join the data from the geofeeds
+ LEFT JOIN
+ geofeeds ON network_geofeeds.url = geofeeds.url
+ LEFT JOIN
+ geofeed_networks ON geofeeds.id = geofeed_networks.geofeed_id
+
+ -- Check whether we have a geofeed for this network
+ WHERE
+ networks.network <<= network_geofeeds.network
+ AND
+ networks.network <<= geofeed_networks.network
+
+ -- Filter for the best result
+ ORDER BY
+ masklen(geofeed_networks.network) DESC
+ LIMIT 1
+ ),
networks.country
) AS country,
inetnum[key].append(val)
+ # Parse the geofeed attribute
+ elif key == "geofeed":
+ inetnum["geofeed"] = val
+
+ # Parse geofeed when used as a remark
+ elif key == "remarks":
+ m = re.match(r"^(?:Geofeed)\s+(https://.*)", val)
+ if m:
+ inetnum["geofeed"] = m.group(1)
+
# Skip empty objects
if not inetnum or not "country" in inetnum:
return
# them into the database, if _check_parsed_network() succeeded
for single_network in inetnum.get("inet6num") or inetnum.get("inetnum"):
if self._check_parsed_network(single_network):
-
# Skip objects with unknown country codes if they are valid to avoid log spam...
if validcountries and invalidcountries:
log.warning("Skipping network with bogus countr(y|ies) %s (original countries: %s): %s" % \
"%s" % single_network, inetnum.get("country")[0], inetnum.get("country"), source_key,
)
+ # Update any geofeed information
+ geofeed = inetnum.get("geofeed", None)
+ if geofeed:
+ self._parse_geofeed(geofeed, single_network)
+
+ # Delete any previous geofeeds
+ else:
+ self.db.execute("DELETE FROM network_geofeeds WHERE network = %s",
+ "%s" % single_network)
+
+ def _parse_geofeed(self, url, single_network):
+ # Parse the URL
+ url = urllib.parse.urlparse(url)
+
+ # Make sure that this is a HTTPS URL
+ if not url.scheme == "https":
+ log.debug("Geofeed URL is not using HTTPS: %s" % geofeed)
+ return
+
+ # Put the URL back together normalized
+ url = url.geturl()
+
+ # Store/update any geofeeds
+ self.db.execute("""
+ INSERT INTO
+ network_geofeeds(
+ network,
+ url
+ )
+ VALUES(
+ %s, %s
+ )
+ ON CONFLICT (network) DO
+ UPDATE SET url = excluded.url""",
+ "%s" % single_network, url,
+ )
+
def _parse_org_block(self, block, source_key):
org = {}
for line in block:
with self.db.transaction():
if server.startswith("/"):
self._handle_update_announcements_from_bird(server)
- else:
- self._handle_update_announcements_from_telnet(server)
# Purge anything we never want here
self.db.execute("""
# We don't need to process any more
break
- def _handle_update_announcements_from_telnet(self, server):
- # Pre-compile regular expression for routes
- route = re.compile(b"^\*[\s\>]i([^\s]+).+?(\d+)\si\r\n", re.MULTILINE|re.DOTALL)
-
- with telnetlib.Telnet(server) as t:
- # Enable debug mode
- #if ns.debug:
- # t.set_debuglevel(10)
-
- # Wait for console greeting
- greeting = t.read_until(b"> ", timeout=30)
- if not greeting:
- log.error("Could not get a console prompt")
- return 1
-
- # Disable pagination
- t.write(b"terminal length 0\n")
-
- # Wait for the prompt to return
- t.read_until(b"> ")
-
- # Fetch the routing tables
- for protocol in ("ipv6", "ipv4"):
- log.info("Requesting %s routing table" % protocol)
-
- # Request the full unicast routing table
- t.write(b"show bgp %s unicast\n" % protocol.encode())
-
- # Read entire header which ends with "Path"
- t.read_until(b"Path\r\n")
-
- while True:
- # Try reading a full entry
- # Those might be broken across multiple lines but ends with i
- line = t.read_until(b"i\r\n", timeout=5)
- if not line:
- break
-
- # Show line for debugging
- #log.debug(repr(line))
-
- # Try finding a route in here
- m = route.match(line)
- if m:
- network, autnum = m.groups()
-
- # Convert network to string
- network = network.decode()
-
- # Append /24 for IPv4 addresses
- if not "/" in network and not ":" in network:
- network = "%s/24" % network
-
- # Convert AS number to integer
- autnum = int(autnum)
-
- log.info("Found announcement for %s by %s" % (network, autnum))
-
- self.db.execute("INSERT INTO announcements(network, autnum) \
- VALUES(%s, %s) ON CONFLICT (network) DO \
- UPDATE SET autnum = excluded.autnum, last_seen_at = CURRENT_TIMESTAMP",
- network, autnum,
- )
-
- log.info("Finished reading the %s routing table" % protocol)
-
def _bird_cmd(self, socket_path, command):
# Connect to the socket
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Otherwise return the line
yield line
+ def handle_update_geofeeds(self, ns):
+ # Sync geofeeds
+ with self.db.transaction():
+ # Delete all geofeeds which are no longer linked
+ self.db.execute("""
+ DELETE FROM
+ geofeeds
+ WHERE
+ NOT EXISTS (
+ SELECT
+ 1
+ FROM
+ network_geofeeds
+ WHERE
+ geofeeds.url = network_geofeeds.url
+ )""",
+ )
+
+ # Copy all geofeeds
+ self.db.execute("""
+ INSERT INTO
+ geofeeds(
+ url
+ )
+ SELECT
+ url
+ FROM
+ network_geofeeds
+ ON CONFLICT (url)
+ DO NOTHING
+ """,
+ )
+
+ # Fetch all Geofeeds that require an update
+ geofeeds = self.db.query("""
+ SELECT
+ id,
+ url
+ FROM
+ geofeeds
+ WHERE
+ updated_at IS NULL
+ OR
+ updated_at <= CURRENT_TIMESTAMP - INTERVAL '1 week'
+ ORDER BY
+ id
+ """)
+
+ with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
+ results = executor.map(self._fetch_geofeed, geofeeds)
+
+ # Fetch all results to raise any exceptions
+ for result in results:
+ pass
+
+ # Delete data from any feeds that did not update in the last two weeks
+ with self.db.transaction():
+ self.db.execute("""
+ DELETE FROM
+ geofeed_networks
+ WHERE
+ geofeed_networks.geofeed_id IN (
+ SELECT
+ geofeeds.id
+ FROM
+ geofeeds
+ WHERE
+ updated_at IS NULL
+ OR
+ updated_at <= CURRENT_TIMESTAMP - INTERVAL '2 weeks'
+ )
+ """)
+
+ def _fetch_geofeed(self, geofeed):
+ log.debug("Fetching Geofeed %s" % geofeed.url)
+
+ with self.db.transaction():
+ # Open the URL
+ try:
+ req = urllib.request.Request(geofeed.url, headers={
+ "User-Agent" : "location/%s" % location.__version__,
+
+ # We expect some plain text file in CSV format
+ "Accept" : "text/csv, text/plain",
+ })
+
+ # XXX set proxy
+
+ # Send the request
+ with urllib.request.urlopen(req, timeout=10) as f:
+ # Remove any previous data
+ self.db.execute("DELETE FROM geofeed_networks \
+ WHERE geofeed_id = %s", geofeed.id)
+
+ lineno = 0
+
+ # Read the output line by line
+ for line in f:
+ lineno += 1
+
+ try:
+ line = line.decode()
+
+ # Ignore any lines we cannot decode
+ except UnicodeDecodeError:
+ log.debug("Could not decode line %s in %s" \
+ % (lineno, geofeed.url))
+ continue
+
+ # Strip any newline
+ line = line.rstrip()
+
+ # Skip empty lines
+ if not line:
+ continue
+
+ # Try to parse the line
+ try:
+ fields = line.split(",", 5)
+ except ValueError:
+ log.debug("Could not parse line: %s" % line)
+ continue
+
+ # Check if we have enough fields
+ if len(fields) < 4:
+ log.debug("Not enough fields in line: %s" % line)
+ continue
+
+ # Fetch all fields
+ network, country, region, city, = fields[:4]
+
+ # Try to parse the network
+ try:
+ network = ipaddress.ip_network(network, strict=False)
+ except ValueError:
+ log.debug("Could not parse network: %s" % network)
+ continue
+
+ # Strip any excess whitespace from country codes
+ country = country.strip()
+
+ # Make the country code uppercase
+ country = country.upper()
+
+ # Check the country code
+ if not country:
+ log.debug("Empty country code in Geofeed %s line %s" \
+ % (geofeed.url, lineno))
+ continue
+
+ elif not location.country_code_is_valid(country):
+ log.debug("Invalid country code in Geofeed %s:%s: %s" \
+ % (geofeed.url, lineno, country))
+ continue
+
+ # Write this into the database
+ self.db.execute("""
+ INSERT INTO
+ geofeed_networks (
+ geofeed_id,
+ network,
+ country,
+ region,
+ city
+ )
+ VALUES (%s, %s, %s, %s, %s)""",
+ geofeed.id,
+ "%s" % network,
+ country,
+ region,
+ city,
+ )
+
+ # Catch any HTTP errors
+ except urllib.request.HTTPError as e:
+ self.db.execute("UPDATE geofeeds SET status = %s, error = %s \
+ WHERE id = %s", e.code, "%s" % e, geofeed.id)
+
+ # Remove any previous data when the feed has been deleted
+ if e.code == 404:
+ self.db.execute("DELETE FROM geofeed_networks \
+ WHERE geofeed_id = %s", geofeed.id)
+
+ # Catch any other errors and connection timeouts
+ except (http.client.InvalidURL, urllib.request.URLError, TimeoutError) as e:
+ log.debug("Could not fetch URL %s: %s" % (geofeed.url, e))
+
+ self.db.execute("UPDATE geofeeds SET status = %s, error = %s \
+ WHERE id = %s", 599, "%s" % e, geofeed.id)
+
+ # Mark the geofeed as updated
+ else:
+ self.db.execute("""
+ UPDATE
+ geofeeds
+ SET
+ updated_at = CURRENT_TIMESTAMP,
+ status = NULL,
+ error = NULL
+ WHERE
+ id = %s""",
+ geofeed.id,
+ )
+
def handle_update_overrides(self, ns):
with self.db.transaction():
# Only drop manually created overrides, as we can be reasonably sure to have them,
"ap-southeast-2": "AU",
"ap-southeast-3": "MY",
"ap-southeast-4": "AU",
+ "ap-southeast-5": "NZ", # Auckland, NZ
"ap-southeast-6": "AP", # XXX: Precise location not documented anywhere
"ap-northeast-1": "JP",
"ca-central-1": "CA",
if len(fcontent) > 10:
self.db.execute("DELETE FROM network_overrides WHERE source = %s", name)
else:
- log.error("%s (%s) returned likely bogus file, ignored" % (name, url))
+ log.warning("%s (%s) returned likely bogus file, ignored" % (name, url))
continue
# Iterate through every line, filter comments and add remaining networks to
if len(fcontent) > 10:
self.db.execute("DELETE FROM autnum_overrides WHERE source = %s", name)
else:
- log.error("%s (%s) returned likely bogus file, ignored" % (name, url))
+ log.warning("%s (%s) returned likely bogus file, ignored" % (name, url))
continue
# Iterate through every line, filter comments and add remaining ASNs to
writer = self.__get_output_formatter(ns)
for asn in ns.asn:
- f = writer("AS%s" % asn, f=sys.stdout)
+ f = writer("AS%s" % asn, family=ns.family, f=sys.stdout)
# Print all matching networks
for n in db.search_networks(asns=[asn], family=ns.family):
for country_code in ns.country_code:
# Open standard output
- f = writer(country_code, f=sys.stdout)
+ f = writer(country_code, family=ns.family, f=sys.stdout)
# Print all matching networks
for n in db.search_networks(country_codes=[country_code], family=ns.family):
raise ValueError(_("You must at least pass one flag"))
writer = self.__get_output_formatter(ns)
- f = writer("custom", f=sys.stdout)
+ f = writer("custom", family=ns.family, f=sys.stdout)
for n in db.search_networks(flags=flags, family=ns.family):
f.write(n)
def handle_list_bogons(self, db, ns):
writer = self.__get_output_formatter(ns)
- f = writer("bogons", f=sys.stdout)
+ f = writer("bogons", family=ns.family, f=sys.stdout)
for n in db.list_bogons(family=ns.family):
f.write(n)
families = [ socket.AF_INET6, socket.AF_INET ]
for object in ns.objects:
- m = re.match("^AS(\d+)$", object)
+ m = re.match(r"^AS(\d+)$", object)
if m:
object = int(m.group(1))
static int loc_database_write_networks(struct loc_writer* writer,
struct loc_database_header_v1* header, off_t* offset, FILE* f) {
+ int r;
+
// Write the network tree
DEBUG(writer->ctx, "Network tree starts at %jd bytes\n", (intmax_t)*offset);
header->network_tree_offset = htobe32(*offset);
TAILQ_HEAD(network_t, network) networks;
TAILQ_INIT(&networks);
+ // Cleanup the tree before writing it
+ r = loc_network_tree_cleanup(writer->networks);
+ if (r)
+ return r;
+
// Add root
struct loc_network_tree_node* root = loc_network_tree_get_root(writer->networks);
node = make_node(root);
TAILQ_REMOVE(&networks, nw, networks);
// Prepare what we are writing to disk
- int r = loc_network_to_database_v1(nw->network, &db_network);
+ r = loc_network_to_database_v1(nw->network, &db_network);
if (r)
return r;