#!/usr/bin/python
+import logging
import os.path
+import pakfire
+
+log = logging.getLogger("repositories")
+log.propagate = 1
+
from . import base
from . import logs
+from .constants import *
from .decorators import *
class Repositories(base.Object):
def __iter__(self):
repositories = self._get_repositories("SELECT * FROM repositories \
- ORDER BY distro_id, name")
+ WHERE deleted IS FALSE ORDER BY distro_id, name")
return iter(repositories)
+ def create(self, distro, name, description):
+ return self._get_repository("INSERT INTO repositories(distro_id, name, description) \
+ VALUES(%s, %s, %s) RETURNING *", distro.id, name, description)
+
def get_by_id(self, repo_id):
return self._get_repository("SELECT * FROM repositories \
WHERE id = %s", repo_id)
- def get_needs_update(self, limit=None):
- query = "SELECT id FROM repositories WHERE needs_update = 'Y'"
- query += " ORDER BY last_update ASC"
-
- # Append limit if any
- if limit:
- query += " LIMIT %d" % limit
-
- repos = self.db.query(query)
-
- return [Repository(self.pakfire, r.id) for r in repos]
-
def get_history(self, limit=None, offset=None, build=None, repo=None, user=None):
query = "SELECT * FROM repositories_history"
args = []
return entries
+ def remaster(self):
+ """
+ Remasters all repositories
+ """
+ for repo in self:
+ # Skip all repositories that don't need an update
+ if not repo.needs_update:
+ log.debug("Repository %s does not need an update" % repo)
+ continue
-class Repository(base.Object):
- def __init__(self, pakfire, id, data=None):
- base.Object.__init__(self, pakfire)
- self.id = id
+ with self.db.transaction():
+ repo.remaster()
- # Cache.
- self._data = data
- self._next = None
- self._prev = None
- self._key = None
- self._distro = None
+ def cleanup(self):
+ """
+ Cleans up all repositories
+ """
+ for repo in self:
+ with self.db.transaction():
+ repo.cleanup()
- @property
- def data(self):
- if self._data is None:
- self._data = self.db.get("SELECT * FROM repositories WHERE id = %s", self.id)
- return self._data
+class Repository(base.DataObject):
+ table = "repositories"
- def __cmp__(self, other):
- if other is None:
- return 1
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return self.id == other.id
- if self.id == other.id:
- return 0
+ def __lt__(self, other):
+ if isinstance(other, self.__class__):
+ return self.parent_id == other.id
- elif self.id == other.parent_id:
- return 1
+ def __iter__(self):
+ builds = self.backend.builds._get_builds("SELECT builds.* FROM repositories_builds \
+ LEFT JOIN builds ON repositories_builds.build_id = builds.id \
+ WHERE repositories_builds.repo_id = %s", self.id)
- elif self.parent_id == other.id:
- return -1
+ return iter(builds)
- return 1
+ def __len__(self):
+ res = self.db.get("SELECT COUNT(*) AS len FROM repositories_builds \
+ WHERE repo_id = %s", self.id)
- def next(self):
- if self._next is None:
- repo = self.db.get("SELECT id FROM repositories \
- WHERE parent_id = %s LIMIT 1", self.id)
+ return res.len
- if not repo:
- return
+ def __nonzero__(self):
+ return True
- self._next = Repository(self.pakfire, repo.id)
+ @lazy_property
+ def next(self):
+ return self.backend.repos._get_repository("SELECT * FROM repositories \
+ WHERE parent_id = %s", self.id)
- return self._next
+ @lazy_property
+ def parent(self):
+ if self.data.parent_id:
+ return self.backend.repos._get_repository("SELECT * FROM repositories \
+ WHERE id = %s", self.data.parent_id)
- def prev(self):
- if not self.parent_id:
- return
+ @lazy_property
+ def distro(self):
+ return self.backend.distros.get_by_id(self.data.distro_id)
- if self._prev is None:
- self._prev = Repository(self.pakfire, self.parent_id)
+ def set_priority(self, priority):
+ self._set_attribute("priority", priority)
- return self._prev
+ priority = property(lambda s: s.data.priority, set_priority)
- @property
- def parent(self):
- return self.prev()
+ def get_user(self):
+ if self.data.user_id:
+ return self.backend.users.get_by_id(self.data.user_id)
- @classmethod
- def create(cls, pakfire, distro, name, description):
- id = pakfire.db.execute("INSERT INTO repositories(distro_id, name, description)"
- " VALUES(%s, %s, %s)", distro.id, name, description)
+ def set_user(self, user):
+ self._set_attribute("user_id", user.id)
- return cls(pakfire, id)
-
- @lazy_property
- def distro(self):
- return self.backend.distros.get_by_id(self.data.distro_id)
+ user = property(get_user, set_user)
@property
def info(self):
}
@property
- def url(self):
- url = os.path.join(
- self.settings.get("repository_baseurl", "http://pakfire.ipfire.org/repositories/"),
+ def basepath(self):
+ return os.path.join(
self.distro.identifier,
self.identifier,
- "%{arch}"
)
- return url
+ @property
+ def path(self):
+ return os.path.join(REPOS_DIR, self.basepath)
+
+ @property
+ def url(self):
+ return "/".join((
+ self.settings.get("baseurl", "https://pakfire.ipfire.org"),
+ "repositories",
+ self.basepath,
+ ))
@property
def mirrorlist(self):
- url = os.path.join(
- self.settings.get("mirrorlist_baseurl", "https://pakfire.ipfire.org/"),
+ return "/".join((
+ self.settings.get("baseurl", "https://pakfire.ipfire.org"),
"distro", self.distro.identifier,
"repo", self.identifier,
"mirrorlist?arch=%{arch}"
- )
-
- return url
-
- def get_conf(self):
- prioritymap = {
- "stable" : 500,
- "unstable" : 200,
- "testing" : 100,
- }
-
- try:
- priority = prioritymap[self.type]
- except KeyError:
- priority = None
+ ))
+ def get_conf(self, local=False):
lines = [
"[repo:%s]" % self.identifier,
"description = %s - %s" % (self.distro.name, self.summary),
"enabled = 1",
- "baseurl = %s" % self.url,
- "mirrors = %s" % self.mirrorlist,
+ "baseurl = %s/%%{arch}" % (self.path if local else self.url),
]
- if priority:
- lines.append("priority = %s" % priority)
+ if self.mirrored and not local:
+ lines.append("mirrors = %s" % self.mirrorlist)
+
+ if self.priority:
+ lines.append("priority = %s" % self.priority)
return "\n".join(lines)
def parent_id(self):
return self.data.parent_id
- @property
+ @lazy_property
def key(self):
if not self.data.key_id:
return
- if self._key is None:
- self._key = self.pakfire.keys.get_by_id(self.data.key_id)
- assert self._key
-
- return self._key
+ return self.pakfire.keys.get_by_id(self.data.key_id)
@property
def arches(self):
- return self.distro.arches
+ return self.distro.arches + ["src"]
- @property
- def mirrored(self):
- return self.data.mirrored == "Y"
+ def set_mirrored(self, mirrored):
+ self._set_attribute("mirrored", mirrored)
- def get_enabled_for_builds(self):
- return self.data.enabled_for_builds == "Y"
+ mirrored = property(lambda s: s.data.mirrored, set_mirrored)
def set_enabled_for_builds(self, state):
- if state:
- state = "Y"
- else:
- state = "N"
-
- self.db.execute("UPDATE repositories SET enabled_for_builds = %s WHERE id = %s",
- state, self.id)
+ self._set_attribute("enabled_for_builds", state)
- if self._data:
- self._data["enabled_for_builds"] = state
-
- enabled_for_builds = property(get_enabled_for_builds, set_enabled_for_builds)
+ enabled_for_builds = property(lambda s: s.data.enabled_for_builds, set_enabled_for_builds)
@property
def score_needed(self):
def time_max(self):
return self.data.time_max
+ def set_update_forced(self, update_forced):
+ self._set_attribute("update_forced", update_forced)
+
+ update_forced = property(lambda s: s.data.update_forced, set_update_forced)
+
def _log_build(self, action, build, from_repo=None, to_repo=None, user=None):
user_id = None
if user:
self.db.execute("DELETE FROM repositories_builds \
WHERE repo_id = %s AND build_id = %s", self.id, build.id)
+ # Force regenerating the index
+ self.update_forced = True
+
if log:
self._log_build("removed", build, from_repo=self, user=user)
self.db.execute("UPDATE repositories_builds SET repo_id = %s, time_added = NOW() \
WHERE repo_id = %s AND build_id = %s", to_repo.id, self.id, build.id)
+ # Force regenerating the index
+ self.update_forced = True
+
# Update bug status.
build._update_bugs_helper(to_repo)
self._log_build("moved", build, from_repo=self, to_repo=to_repo,
user=user)
- def build_count(self):
- query = self.db.get("SELECT COUNT(*) AS count FROM repositories_builds \
- WHERE repo_id = %s", self.id)
-
- if query:
- return query.count
-
def get_builds(self, limit=None, offset=None):
query = "SELECT build_id AS id FROM repositories_builds \
WHERE repo_id = %s ORDER BY time_added DESC"
return _builds
- def _get_packages(self, arch):
- if arch.name == "src":
- pkgs = self.db.query("SELECT packages.id AS id, packages.path AS path FROM packages \
- JOIN builds ON builds.pkg_id = packages.id \
- JOIN repositories_builds ON builds.id = repositories_builds.build_id \
- WHERE packages.arch = %s AND repositories_builds.repo_id = %s",
- arch.name, self.id)
-
- else:
- noarch = self.pakfire.arches.get_by_name("noarch")
- assert noarch
-
- pkgs = self.db.query("SELECT packages.id AS id, packages.path AS path FROM packages \
- JOIN jobs_packages ON jobs_packages.pkg_id = packages.id \
- JOIN jobs ON jobs_packages.job_id = jobs.id \
- JOIN builds ON builds.id = jobs.build_id \
- JOIN repositories_builds ON builds.id = repositories_builds.build_id \
- WHERE (jobs.arch = %s OR jobs.arch = %s) AND \
- repositories_builds.repo_id = %s",
- arch.name, noarch.name, self.id)
+ def get_builds_by_name(self, name):
+ """
+ Returns an ordered list of all builds that match this name
+ """
+ builds = self.backend.builds._get_builds("SELECT builds.* FROM repositories_builds \
+ LEFT JOIN builds ON repositories_builds.build_id = builds.id \
+ LEFT JOIN packages ON builds.pkg_id = packages.id \
+ WHERE repositories_builds.repo_id = %s AND packages.name = %s", self.id, name)
- return pkgs
+ return sorted(builds)
def get_packages(self, arch):
- pkgs = [self.pakfire.packages.get_by_id(p.id) for p in self._get_packages(arch)]
- pkgs.sort()
-
- return pkgs
-
- def get_paths(self, arch):
- paths = [p.path for p in self._get_packages(arch)]
- paths.sort()
-
- return paths
+ if arch == "src":
+ return self.backend.packages._get_packages("SELECT packages.* FROM repositories_builds \
+ LEFT JOIN builds ON repositories_builds.build_id = builds.id \
+ LEFT JOIN packages ON builds.pkg_id = packages.id \
+ WHERE repositories_builds.repo_id = %s", self.id)
+
+ return self.backend.packages._get_packages("SELECT packages.* FROM repositories_builds \
+ LEFT JOIN builds ON repositories_builds.build_id = builds.id \
+ LEFT JOIN jobs ON builds.id = jobs.build_id \
+ LEFT JOIN jobs_packages ON jobs.id = jobs_packages.job_id \
+ LEFT JOIN packages ON jobs_packages.pkg_id = packages.id \
+ WHERE repositories_builds.repo_id = %s \
+ AND (jobs.arch = %s OR jobs.arch = %s) \
+ AND (packages.arch = %s OR packages.arch = %s)",
+ self.id, arch, "noarch", arch, "noarch")
@property
- def packages(self):
- return self.get_packages()
-
- def get_unpushed_builds(self):
- query = self.db.query("SELECT build_id FROM repositories_builds \
- WHERE repo_id = %s AND \
- time_added > (SELECT last_update FROM repositories WHERE id = %s)",
- self.id, self.id)
-
- ret = []
- for row in query:
- b = self.pakfire.builds.get_by_id(row.build_id)
- ret.append(b)
-
- return ret
+ def unpushed_builds(self):
+ return self.backend.builds._get_builds("SELECT builds.* FROM repositories \
+ LEFT JOIN repositories_builds ON repositories.id = repositories_builds.repo_id \
+ LEFT JOIN builds ON repositories_builds.build_id = builds.id \
+ WHERE repositories.id = %s \
+ AND repositories_builds.time_added >= repositories.last_update", self.id)
def get_obsolete_builds(self):
- #query = self.db.query("SELECT build_id AS id FROM repositories_builds \
- # JOIN builds ON repositories.build_id = builds.id \
- # WHERE repositories_builds.repo_id = %s AND builds.state = 'obsolete'",
- # self.id)
- #
- #ret = []
- #for row in query:
- # b = builds.Build(self.pakfire, row.id)
- # ret.append(b)
- #
- #return ret
return self.pakfire.builds.get_obsolete(self)
+ @property
def needs_update(self):
- if self.get_unpushed_builds:
+ if self.unpushed_builds:
return True
return False
self.db.execute("UPDATE repositories SET last_update = NOW() \
WHERE id = %s", self.id)
+ # Reset forced update flag
+ self.update_forced = False
+
+ def remaster(self):
+ log.info("Going to update repository %s..." % self.name)
+
+ for arch in self.arches:
+ changed = False
+
+ repo_path = os.path.join(self.path, arch)
+ log.debug(" Path: %s" % repo_path)
+
+ if not os.path.exists(repo_path):
+ os.makedirs(repo_path)
+
+ # Get all packages that are to be included in this repository
+ packages = []
+ for p in self.get_packages(arch):
+ path = os.path.join(repo_path, p.filename)
+ packages.append(path)
+
+ # Nothing to do if the package already exists
+ if os.path.exists(path):
+ continue
+
+ # Copy the package into the repository
+ log.info("Adding %s..." % p)
+ p.copy(repo_path)
+
+ # XXX need to sign the new package here
+
+ # The repository has been changed
+ changed = True
+
+ # No need to regenerate the index if the repository hasn't changed
+ if not changed and not self.update_forced:
+ continue
+
+ # Find the key to sign the package.
+ key_id = None
+ if self.key:
+ key_id = self.key.fingerprint
+
+ # Create package index.
+ p = pakfire.PakfireServer(arch=arch)
+ p.repo_create(repo_path, packages,
+ name="%s - %s.%s" % (self.distro.name, self.name, arch),
+ key_id=key_id)
+
+ # Update the timestamp when we started at last
+ self.updated()
+
+ def cleanup(self):
+ log.info("Cleaning up repository %s..." % self.name)
+
+ for arch in self.arches:
+ repo_path = os.path.join(self.path, arch)
+
+ # Get a list of all files in the repository directory right now
+ filelist = [e for e in os.listdir(repo_path)
+ if os.path.isfile(os.path.join(repo_path, e))]
+
+ # Get a list of all packages that should be in the repository
+ # and remove them from the filelist
+ for p in self.get_packages(arch):
+ try:
+ filelist.remove(p.filename)
+ except ValueError:
+ pass
+
+ # For any files that do not belong into the repository
+ # any more, we will just delete them
+ for filename in filelist:
+ path = os.path.join(repo_path, filename)
+ self.backend.delete_file(path)
+
def get_history(self, **kwargs):
kwargs.update({
"repo" : self,
def get_build_times(self):
times = []
for arch in self.arches:
+ if arch == "src":
+ continue
+
time = self.db.get("SELECT SUM(jobs.time_finished - jobs.time_started) AS time FROM jobs \
JOIN builds ON builds.id = jobs.build_id \
JOIN repositories_builds ON builds.id = repositories_builds.build_id \
WHERE (jobs.arch = %s OR jobs.arch = %s) AND \
- jobs.type = 'build' AND \
- repositories_builds.repo_id = %s", arch, "noarch", self.id)
+ jobs.test IS FALSE AND repositories_builds.repo_id = %s", arch, "noarch", self.id)
- times.append((arch, time.time.total_seconds()))
+ times.append((arch, time.time.total_seconds() if time.time else 0))
return times
-class RepositoryAux(base.Object):
- def __init__(self, pakfire, id):
- base.Object.__init__(self, pakfire)
-
- self.id = id
-
- # Cache.
- self._data = None
- self._distro = None
-
- @property
- def data(self):
- if self._data is None:
- self._data = self.db.get("SELECT * FROM repositories_aux WHERE id = %s", self.id)
- assert self._data
-
- return self._data
+class RepositoryAux(base.DataObject):
+ table = "repositories_aux"
@property
def name(self):
@property
def distro(self):
- if self._distro is None:
- self._distro = self.pakfire.distros.get_by_id(self.data.distro_id)
- assert self._distro
-
- return self._distro
+ return self.pakfire.distros.get_by_id(self.data.distro_id)
- def get_conf(self):
+ def get_conf(self, local=False):
lines = [
"[repo:%s]" % self.identifier,
"description = %s - %s" % (self.distro.name, self.name),