--- /dev/null
+/build
+/tmp
+*.py[co]
--- /dev/null
+
+# This is the main configuration file for pakfire.
+
+[main]
+logfile = /var/log/pakfire.log
+
+
+[distro]
+name = IPFire
+version = 3.0-prealpha2
+slogan = Gluttony
+
+vendor = ipfire
+
--- /dev/null
+;[ipfire]
+;description = IPFire Main Repository
+;
+;url = http://mirror0.ipfire.org/pakfire3/$name/$arch
+;
+;gpgkey = /not/yet/existant
+
+[ipfire-development]
+description = IPFire Development Repository
+
+#url = http://mirror0.ipfire.org/pakfire3/$name/$arch
+url = file:///ipfire-3.x/build/packages/i686
+
+gpgkey = /not/yet/existant
+
--- /dev/null
+#!/usr/bin/python
+
+import logging
+import os
+import random
+import string
+
+import builder
+import config
+import database
+import depsolve
+import distro
+import logger
+import packages
+import plugins
+import repository
+import transaction
+
+from constants import *
+from errors import BuildError
+from i18n import _
+
+__version__ = 0.1
+
+
+class Pakfire(object):
+ def __init__(self, path="/tmp/pakfire", builder=False, configs=[]):
+ # The path where we are operating in
+ self.path = path
+
+ # Save if we are in the builder mode
+ self.builder = builder
+
+ if self.builder:
+ rnd = random.sample(string.lowercase + string.digits, 12)
+ self.path = os.path.join(BUILD_ROOT, "".join(rnd))
+
+ self.debug = False
+
+ # Read configuration file(s)
+ self.config = config.Config(pakfire=self)
+ for filename in configs:
+ self.config.read(filename)
+
+ # Setup the logger
+ logger.setup_logging(self.config)
+ self.config.dump()
+
+ # Load plugins
+ self.plugins = plugins.Plugins(pakfire=self)
+
+ # Get more information about the distribution we are running
+ # or building
+ self.distro = distro.Distribution(pakfire=self)
+
+ # Load all repositories
+ self.repos = repository.Repositories(pakfire=self)
+
+ # Run plugins that implement an initialization method.
+ self.plugins.run("init")
+
+ # XXX disable repositories if passed on command line
+
+ def check_build_mode(self):
+ """
+ Check if we are running in build mode.
+ Otherwise, raise an exception.
+ """
+ if not self.builder:
+ raise BuildError, "Cannot build when not in build mode."
+
+ def check_host_arch(self, arch):
+ """
+ Check if we can build for arch.
+ """
+
+ # If no arch was given on the command line we build for our
+ # own arch which should always work.
+ if not arch:
+ return True
+
+ if not self.distro.host_supports_arch(arch):
+ raise BuildError, "Cannot build for the target architecture: %s" % arch
+
+ raise BuildError, arch
+
+ def build(self, pkg, arch=None, resultdir=None):
+ self.check_build_mode()
+ self.check_host_arch(arch)
+
+ b = builder.Builder(pakfire=self, pkg=pkg)
+ b.extract()
+
+ if not resultdir:
+ resultdir = self.config.get("resultdir")
+
+ try:
+ b.build()
+ b.copy_result(resultdir)
+ finally:
+ b.cleanup()
+
+ def shell(self, pkg, arch=None):
+ self.check_build_mode()
+ self.check_host_arch(arch)
+
+ b = builder.Builder(pakfire=self, pkg=pkg)
+ b.extract(SHELL_PACKAGES)
+
+ try:
+ b.shell()
+ finally:
+ b.cleanup()
+
+ def dist(self, pkg, resultdir=None):
+ self.check_build_mode()
+
+ b = builder.Builder(pakfire=self, pkg=pkg)
+ b.extract(build_deps=False)
+
+ if not resultdir:
+ resultdir = self.config.get("resultdir")
+
+ try:
+ b.dist()
+ b.copy_result(resultdir)
+ finally:
+ b.cleanup()
+
+ def install(self, requires):
+ ds = depsolve.DependencySet(pakfire=self)
+
+ for req in requires:
+ if isinstance(BinaryPackage, req):
+ ds.add_package(req)
+ else:
+ ds.add_requires(req)
+
+ ds.resolve()
+
+ ts = transaction.TransactionSet(self, ds)
+ ts.dump()
+
+ ret = cli.ask_user(_("Is this okay?"))
+ if not ret:
+ return
+
+ ts.run()
+
--- /dev/null
+#!/usr/bin/python
+
+class Singleton(type):
+ """
+ A class for using the singleton pattern
+ """
+ def __init__(cls, name, bases, dict):
+ super(Singleton, cls).__init__(name, bases, dict)
+ cls.instance = None
+
+ def __call__(cls, *args, **kw):
+ if cls.instance is None:
+ cls.instance = super(Singleton, cls).__call__(*args, **kw)
+
+ return cls.instance
--- /dev/null
+#!/usr/bin/python
+
+import fcntl
+import grp
+import logging
+import os
+import re
+import shutil
+import stat
+import time
+
+import depsolve
+import packages
+import transaction
+import util
+
+from constants import *
+from errors import BuildRootLocked
+
+
+class Builder(object):
+ # The version of the kernel this machine is running.
+ kernel_version = os.uname()[2]
+
+ def __init__(self, pakfire, pkg):
+ self.pakfire = pakfire
+ self.pkg = pkg
+
+ self.settings = {
+ "enable_loop_devices" : True,
+ }
+
+ self.buildroot = "/buildroot"
+
+ # Lock the buildroot
+ self._lock = None
+ self.lock()
+
+ # Initialize the environment
+ self.prepare()
+
+ @property
+ def path(self):
+ return self.pakfire.path
+
+ def lock(self):
+ filename = os.path.join(self.path, ".lock")
+
+ try:
+ self._lock = open(filename, "a+")
+ except IOError, e:
+ return 0
+
+ try:
+ fcntl.lockf(self._lock.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError, e:
+ raise BuildRootLocked, "Buildroot is locked"
+
+ return 1
+
+ def unlock(self):
+ if self._lock:
+ self._lock.close()
+ self._lock = None
+
+ def copyin(self, file_out, file_in):
+ if file_in.startswith("/"):
+ file_in = file_in[1:]
+
+ file_in = self.chrootPath(file_in)
+
+ #if not os.path.exists(file_out):
+ # return
+
+ dir_in = os.path.dirname(file_in)
+ if not os.path.exists(dir_in):
+ os.makedirs(dir_in)
+
+ logging.debug("%s --> %s" % (file_out, file_in))
+
+ shutil.copy2(file_out, file_in)
+
+ def copyout(self, file_in, file_out):
+ if file_in.startswith("/"):
+ file_in = file_in[1:]
+
+ file_in = self.chrootPath(file_in)
+
+ #if not os.path.exists(file_in):
+ # return
+
+ dir_out = os.path.dirname(file_out)
+ if not os.path.exists(dir_out):
+ os.makedirs(dir_out)
+
+ logging.debug("%s --> %s" % (file_in, file_out))
+
+ shutil.copy2(file_in, file_out)
+
+ def copy_result(self, resultdir):
+ dir_in = self.chrootPath("result")
+
+ for dir, subdirs, files in os.walk(dir_in):
+ basename = os.path.basename(dir)
+ dir = dir[len(self.chrootPath()):]
+ for file in files:
+ file_in = os.path.join(dir, file)
+
+ file_out = os.path.join(
+ resultdir,
+ basename,
+ file,
+ )
+
+ self.copyout(file_in, file_out)
+
+ def extract(self, requires=[], build_deps=True):
+ """
+ Gets a dependency set and extracts all packages
+ to the environment.
+ """
+ ds = depsolve.DependencySet(self.pakfire)
+ for p in BUILD_PACKAGES + requires:
+ ds.add_requires(p)
+ ds.resolve()
+
+ # Get build dependencies from source package.
+ if isinstance(self.pkg, packages.SourcePackage):
+ for req in self.pkg.requires:
+ ds.add_requires(req)
+
+ ts = transaction.TransactionSet(self.pakfire, ds)
+ ts.dump()
+ ts.run()
+
+ # Copy the makefile and load source tarballs.
+ if isinstance(self.pkg, packages.Makefile):
+ self.pkg.extract(self)
+
+ # If we have a makefile, we can only get the build dependencies
+ # after we have extracted all the rest.
+ if build_deps and isinstance(self.pkg, packages.Makefile):
+ requires = self.make_requires()
+ if not requires:
+ return
+
+ ds = depsolve.DependencySet(self.pakfire)
+ for r in requires:
+ ds.add_requires(r)
+ ds.resolve()
+
+ ts = transaction.TransactionSet(self.pakfire, ds)
+ ts.dump()
+ ts.run()
+
+ @property
+ def log(self):
+ # XXX for now, return the root logger
+ return logging.getLogger()
+
+ def chrootPath(self, *args):
+ # Remove all leading slashes
+ _args = []
+ for arg in args:
+ if arg.startswith("/"):
+ arg = arg[1:]
+ _args.append(arg)
+ args = _args
+
+ ret = os.path.join(self.path, *args)
+ ret = ret.replace("//", "/")
+
+ assert ret.startswith(self.path)
+
+ return ret
+
+ def prepare(self):
+ # Create directory.
+ if not os.path.exists(self.path):
+ os.makedirs(self.path)
+
+ # Create important directories.
+ dirs = [
+ "build",
+ self.buildroot,
+ "dev",
+ "dev/pts",
+ "dev/shm",
+ "etc",
+ "proc",
+ "result",
+ "sys",
+ "tmp",
+ "usr/src",
+ ]
+ for dir in dirs:
+ dir = self.chrootPath(dir)
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+
+ self._prepare_dev()
+ self._prepare_users()
+ self._prepare_dns()
+
+ def _prepare_dev(self):
+ prevMask = os.umask(0000)
+
+ nodes = [
+ ("dev/null", stat.S_IFCHR | 0666, os.makedev(1, 3)),
+ ("dev/full", stat.S_IFCHR | 0666, os.makedev(1, 7)),
+ ("dev/zero", stat.S_IFCHR | 0666, os.makedev(1, 5)),
+ ("dev/random", stat.S_IFCHR | 0666, os.makedev(1, 8)),
+ ("dev/urandom", stat.S_IFCHR | 0444, os.makedev(1, 9)),
+ ("dev/tty", stat.S_IFCHR | 0666, os.makedev(5, 0)),
+ ("dev/console", stat.S_IFCHR | 0600, os.makedev(5, 1)),
+ ]
+
+ # If we need loop devices (which are optional) we create them here.
+ if self.settings["enable_loop_devices"]:
+ for i in range(0, 7):
+ nodes.append(("dev/loop%d" % i, stat.S_IFBLK | 0660, os.makedev(7, i)))
+
+ # Create all the nodes.
+ for node in nodes:
+ self._create_node(*node)
+
+ os.symlink("/proc/self/fd/0", self.chrootPath("dev", "stdin"))
+ os.symlink("/proc/self/fd/1", self.chrootPath("dev", "stdout"))
+ os.symlink("/proc/self/fd/2", self.chrootPath("dev", "stderr"))
+ os.symlink("/proc/self/fd", self.chrootPath("dev", "fd"))
+
+ # make device node for el4 and el5
+ if self.kernel_version < "2.6.19":
+ self._make_node("dev/ptmx", stat.S_IFCHR | 0666, os.makedev(5, 2))
+ else:
+ os.symlink("/dev/pts/ptmx", self.chrootPath("dev", "ptmx"))
+
+ os.umask(prevMask)
+
+ def _prepare_users(self):
+ f = open(self.chrootPath("etc", "passwd"), "w")
+ f.write("root:x:0:0:root:/root:/bin/bash\n")
+ f.write("nobody:x:99:99:Nobody:/:/sbin/nologin\n")
+ f.close()
+
+ f = open(self.chrootPath("etc", "group"), "w")
+ f.write("root:x:0:root\n")
+ f.write("nobody:x:99:\n")
+ f.close()
+
+ def _prepare_dns(self):
+ # XXX to be replaced
+ # maybe we can copyin /etc/resolv.conf and /etc/hosts
+ nameservers = []
+ f = open("/etc/resolv.conf")
+ for line in f.readlines():
+ if line.startswith("nameserver"):
+ nameservers.append(line.split(" ")[-1].strip())
+ f.close()
+
+ logging.debug("Using nameservers: %s" % nameservers)
+
+ f = open(self.chrootPath("etc", "resolv.conf"), "w")
+ for nameserver in nameservers:
+ f.write("nameserver %s" % nameserver)
+ f.close()
+
+ logging.debug("Creating record for localhost")
+ f = open(self.chrootPath("etc", "hosts"), "w")
+ f.write("127.0.0.1 localhost\n")
+ f.close()
+
+ def _create_node(self, filename, mode, device):
+ logging.debug("Create node: %s (%s)" % (filename, mode))
+
+ filename = self.chrootPath(filename)
+
+ # Create parent directory if it is missing.
+ dirname = os.path.dirname(filename)
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+
+ os.mknod(filename, mode, device)
+
+ def cleanup(self):
+ logging.debug("Cleanup environment %s" % self.path)
+
+ if os.path.exists(self.path):
+ util.rm(self.path)
+
+ def _mountall(self):
+ self.log.debug("Mounting environment")
+ for cmd, mountpoint in self.mountpoints:
+ cmd = "%s %s" % (cmd, self.chrootPath(mountpoint))
+ util.do(cmd, shell=True)
+
+ def _umountall(self):
+ self.log.debug("Umounting environment")
+ for cmd, mountpoint in self.mountpoints:
+ cmd = "umount -n %s" % self.chrootPath(mountpoint)
+ util.do(cmd, raiseExc=0, shell=True)
+
+ @property
+ def mountpoints(self):
+ ret = [
+ ("mount -n -t proc pakfire_chroot_proc", "proc"),
+ ("mount -n -t sysfs pakfire_chroot_sysfs", "sys"),
+ ]
+
+ mountopt = "gid=%d,mode=0620,ptmxmode=0666" % grp.getgrnam("tty").gr_gid
+ if self.kernel_version >= "2.6.29":
+ mountopt += ",newinstance"
+
+ ret.extend([
+ ("mount -n -t devpts -o %s pakfire_chroot_devpts" % mountopt, "dev/pts"),
+ ("mount -n -t tmpfs pakfire_chroot_shmfs", "dev/shm"),
+ ])
+
+ return ret
+
+ @property
+ def environ(self):
+ env = {
+ "BUILDROOT" : self.buildroot,
+
+ # XXX I want to get rid of these too and invoke a login shell
+ "HOME" : "/root",
+ "PATH" : "/sbin:/bin:/usr/sbin:/usr/bin",
+ }
+
+ # Inherit environment from distro
+ env.update(self.pakfire.distro.environ)
+
+ # XXX what do we need else?
+
+ return env
+
+ def do(self, command, shell=True, personality=None, *args, **kwargs):
+ ret = None
+ try:
+ # Environment variables
+ env = self.environ
+
+ if kwargs.has_key("env"):
+ env.update(kwargs.pop("env"))
+
+ self._mountall()
+
+ if not kwargs.has_key("chrootPath"):
+ kwargs["chrootPath"] = self.chrootPath()
+
+ ret = util.do(
+ command,
+ personality=personality,
+ shell=shell,
+ env=env,
+ logger=self.log,
+ *args,
+ **kwargs
+ )
+
+ finally:
+ self._umountall()
+
+ return ret
+
+ def make(self, *args, **kwargs):
+ # XXX need to get rid of this
+ env = { "PKGROOT" : "/usr/lib/buildsystem" }
+ try:
+ kwargs["env"].update(env)
+ except KeyError:
+ kwargs["env"] = env
+
+ return self.do("make -f /build/%s %s" % \
+ (os.path.basename(self.pkg.filename), " ".join(args)),
+ shell=True, **kwargs)
+
+ @property
+ def make_info(self):
+ if not hasattr(self, "_make_info"):
+ info = {}
+
+ output = self.make("buildinfo", returnOutput=True)
+
+ for line in output.splitlines():
+ # XXX temporarily
+ if not line:
+ break
+
+ m = re.match(r"^(\w+)=(.*)$", line)
+ if not m:
+ continue
+
+ info[m.group(1)] = m.group(2).strip("\"")
+
+ self._make_info = info
+
+ return self._make_info
+
+ @property
+ def packages(self):
+ if hasattr(self, "_packages"):
+ return self._packages
+
+ pkgs = []
+ output = self.make("packageinfo", returnOutput=True)
+
+ pkg = {}
+ for line in output.splitlines():
+ if not line:
+ pkgs.append(pkg)
+ pkg = {}
+
+ m = re.match(r"^(\w+)=(.*)$", line)
+ if not m:
+ continue
+
+ k, v = m.groups()
+ pkg[k] = v.strip("\"")
+
+ self._packages = []
+ for pkg in pkgs:
+ pkg = packages.VirtualPackage(pkg)
+ self._packages.append(pkg)
+
+ return self._packages
+
+ def make_requires(self):
+ return self.make_info.get("PKG_BUILD_DEPS", "").split()
+
+ def make_sources(self):
+ return self.make_info.get("PKG_FILES", "").split()
+
+ def build(self):
+ self.make("build")
+
+ for pkg in reversed(self.packages):
+ packager = packages.Packager(self.pakfire, pkg, self)
+ packager()
+
+ def dist(self):
+ self.pkg.dist(self)
+
+ def shell(self, args=[]):
+ # XXX need to add linux32 or linux64 to the command line
+ # XXX need to set CFLAGS here
+ command = "chroot %s /usr/bin/chroot-shell %s" % \
+ (self.chrootPath(), " ".join(args))
+
+ for key, val in self.environ.items():
+ command = "%s=\"%s\" " % (key, val) + command
+
+ # Empty the environment
+ command = "env -i - %s" % command
+
+ logging.debug("Shell command: %s" % command)
+
+ try:
+ self._mountall()
+
+ shell = os.system(command)
+ return os.WEXITSTATUS(shell)
+
+ finally:
+ self._umountall()
--- /dev/null
+#!/usr/bin/python
+
+import argparse
+import sys
+
+import packages
+
+from pakfire import Pakfire
+
+from constants import *
+from i18n import _
+
+def ask_user(question):
+ """
+ Ask the user the question, he or she can answer with yes or no.
+
+ This function returns True for "yes" and False for "no".
+
+ If the software is running in a non-inteactive shell, no question
+ is asked at all and the answer is always "yes".
+ """
+ if not sys.stdin.isatty() or not sys.stdout.isatty() or not sys.stderr.isatty():
+ return True
+
+ print _("%s [y/N]") % question,
+ ret = raw_input()
+
+ return ret in ("y", "Y")
+
+
+class Cli(object):
+ # XXX check if we are running as the root user
+
+ def __init__(self):
+ self.parser = argparse.ArgumentParser(
+ description = _("Pakfire command line interface."),
+ )
+
+ self.parse_common_arguments()
+
+ self.parser.add_argument("--instroot", metavar="PATH",
+ default="/tmp/pakfire",
+ help=_("The path where pakfire should operate in."))
+
+ # Add sub-commands.
+ self.sub_commands = self.parser.add_subparsers()
+
+ self.parse_command_install()
+ self.parse_command_info()
+ self.parse_command_search()
+ self.parse_command_update()
+
+ # Finally parse all arguments from the command line and save them.
+ self.args = self.parser.parse_args()
+
+ # Create instance of the wonderful pakfire :)
+ self.pakfire = Pakfire(
+ self.args.instroot,
+ configs = [self.args.config],
+ )
+
+ self.action2func = {
+ "install" : self.handle_install,
+ "update" : self.handle_update,
+ "info" : self.handle_info,
+ "search" : self.handle_search,
+ }
+
+ def parse_common_arguments(self):
+ self.parser.add_argument("-v", "--verbose", action="store_true",
+ help=_("Enable verbose output."))
+
+ self.parser.add_argument("-c", "--config", nargs="?",
+ help=_("Path to a configuration file to load."))
+
+ def parse_command_install(self):
+ # Implement the "install" command.
+ sub_install = self.sub_commands.add_parser("install",
+ help=_("Install one or more packages to the system."))
+ sub_install.add_argument("package", nargs="+",
+ help=_("Give name of at least one package to install."))
+ sub_install.add_argument("action", action="store_const", const="install")
+
+ def parse_command_update(self):
+ # Implement the "update" command.
+ sub_update = self.sub_commands.add_parser("update",
+ help=_("Update the whole system or one specific package."))
+ sub_update.add_argument("package", nargs="*",
+ help=_("Give a name of a package to update or leave emtpy for all."))
+ sub_update.add_argument("action", action="store_const", const="update")
+
+ def parse_command_info(self):
+ # Implement the "info" command.
+ sub_info = self.sub_commands.add_parser("info",
+ help=_("Print some information about the given package(s)."))
+ sub_info.add_argument("package", nargs="+",
+ help=_("Give at least the name of one package."))
+ sub_info.add_argument("action", action="store_const", const="info")
+
+ def parse_command_search(self):
+ # Implement the "search" command.
+ sub_search = self.sub_commands.add_parser("search",
+ help=_("Search for a given pattern."))
+ sub_search.add_argument("pattern",
+ help=_("A pattern to search for."))
+ sub_search.add_argument("action", action="store_const", const="search")
+
+ def run(self):
+ action = self.args.action
+
+ if not self.action2func.has_key(action):
+ raise
+
+ try:
+ func = self.action2func[action]
+ except KeyError:
+ raise # XXX catch and return better error message
+
+ return func()
+
+ def handle_info(self):
+ for pattern in self.args.package:
+ pkgs = self.pakfire.repos.get_by_glob(pattern)
+
+ pkgs = packages.PackageListing(pkgs)
+
+ for pkg in pkgs:
+ print pkg.dump()
+
+ def handle_search(self):
+ pkgs = self.pakfire.repos.search(self.args.pattern)
+
+ pkgs = packages.PackageListing(pkgs)
+
+ for pkg in pkgs:
+ print pkg.dump(short=True)
+
+ def handle_update(self):
+ pass
+
+
+class CliBuilder(Cli):
+ def __init__(self):
+ self.parser = argparse.ArgumentParser(
+ description = _("Pakfire builder command line interface."),
+ )
+
+ self.parse_common_arguments()
+
+ # Add sub-commands.
+ self.sub_commands = self.parser.add_subparsers()
+
+ self.parse_command_build()
+ self.parse_command_dist()
+ self.parse_command_info()
+ self.parse_command_search()
+ self.parse_command_shell()
+ self.parse_command_update()
+
+ # Finally parse all arguments from the command line and save them.
+ self.args = self.parser.parse_args()
+
+ self.pakfire = Pakfire(
+ builder = True,
+ configs = [self.args.config],
+ )
+
+ self.action2func = {
+ "build" : self.handle_build,
+ "dist" : self.handle_dist,
+ "update" : self.handle_update,
+ "info" : self.handle_info,
+ "search" : self.handle_search,
+ "shell" : self.handle_shell,
+ }
+
+ def parse_command_update(self):
+ # Implement the "update" command.
+ sub_update = self.sub_commands.add_parser("update",
+ help=_("Update the package indexes."))
+ sub_update.add_argument("action", action="store_const", const="update")
+
+ def parse_command_build(self):
+ # Implement the "build" command.
+ sub_build = self.sub_commands.add_parser("build",
+ help=_("Build one or more packages."))
+ sub_build.add_argument("package", nargs=1,
+ help=_("Give name of at least one package to build."))
+ sub_build.add_argument("action", action="store_const", const="build")
+
+ sub_build.add_argument("-a", "--arch",
+ help=_("Build the package for the given architecture."))
+ sub_build.add_argument("--resultdir", nargs="?",
+ help=_("Path were the output files should be copied to."))
+
+ def parse_command_shell(self):
+ # Implement the "shell" command.
+ sub_shell = self.sub_commands.add_parser("shell",
+ help=_("Go into a shell."))
+ sub_shell.add_argument("package", nargs=1,
+ help=_("Give name of a package."))
+ sub_shell.add_argument("action", action="store_const", const="shell")
+
+ sub_shell.add_argument("-a", "--arch",
+ help=_("Emulated architecture in the shell."))
+
+ def parse_command_dist(self):
+ # Implement the "dist" command.
+ sub_dist = self.sub_commands.add_parser("dist",
+ help=_("Generate a source package."))
+ sub_dist.add_argument("package", nargs=1,
+ help=_("Give name of a package."))
+ sub_dist.add_argument("action", action="store_const", const="dist")
+
+ sub_dist.add_argument("--resultdir", nargs="?",
+ help=_("Path were the output files should be copied to."))
+
+ def handle_build(self):
+ print self.args
+ # Get the package descriptor from the command line options
+ pkg = self.args.package[0]
+
+ # Check, if we got a regular file
+ if os.path.exists(pkg):
+ pkg = os.path.abspath(pkg)
+
+ if pkg.endswith(MAKEFILE_EXTENSION):
+ pkg = packages.Makefile(pkg)
+
+ elif pkg.endswith(PACKAGE_EXTENSION):
+ pkg = packages.SourcePackage(pkg)
+
+ else:
+ # XXX walk through the source tree and find a matching makefile
+ pass
+
+ self.pakfire.build(pkg, arch=self.args.arch, resultdir=self.args.resultdir)
+
+ def handle_shell(self):
+ print self.args
+ # Get the package descriptor from the command line options
+ pkg = self.args.package[0]
+
+ # Check, if we got a regular file
+ if os.path.exists(pkg):
+ pkg = os.path.abspath(pkg)
+
+ if pkg.endswith(MAKEFILE_EXTENSION):
+ pkg = packages.Makefile(pkg)
+
+ elif pkg.endswith(PACKAGE_EXTENSION):
+ pkg = packages.SourcePackage(pkg)
+
+ else:
+ # XXX walk through the source tree and find a matching makefile
+ pass
+
+ self.pakfire.shell(pkg, arch=self.args.arch)
+
+ def handle_dist(self):
+ print self.args
+ # Get the package descriptor from the command line options
+ pkg = self.args.package[0]
+
+ # Check, if we got a regular file
+ if os.path.exists(pkg):
+ pkg = os.path.abspath(pkg)
+
+ if pkg.endswith(MAKEFILE_EXTENSION):
+ pkg = packages.Makefile(pkg)
+
+ else:
+ # XXX walk through the source tree and find a matching makefile
+ pass
+
+ self.pakfire.dist(pkg, self.args.resultdir)
+
--- /dev/null
+#!/usr/bin/python
+
+import logging
+import os
+
+from ConfigParser import ConfigParser
+
+import base
+
+from constants import *
+
+class Config(object):
+ def __init__(self, pakfire):
+ self.pakfire = pakfire
+
+ self._config = {
+ "debug" : True,
+ "logfile" : "/var/log/pakfire.log",
+ "source_download_url" : SOURCE_DOWNLOAD_URL,
+ }
+
+ self._config_repos = {}
+ self._distro = {}
+ self._files = []
+
+ # Read default configuration files
+ for file in self.config_files:
+ self.read(file)
+
+ def dump(self):
+ logging.debug("Configuration:")
+ for k, v in self._config.items():
+ logging.debug(" %s : %s" % (k, v))
+
+ logging.debug("Loaded from files:")
+ for f in self._files:
+ logging.debug(" %s" % f)
+
+ def read(self, filename):
+ # If filename does not exist we return silently
+ if not os.path.exists(filename):
+ return
+
+ filename = os.path.abspath(filename)
+
+ # If the file was already loaded, we return silently, too
+ if filename in self._files:
+ return
+
+ logging.debug("Reading configuration file: %s" % filename)
+
+ config = ConfigParser()
+ config.read(filename)
+
+ # Read the main section from the file if any
+ if "main" in config.sections():
+ for k,v in config.items("main"):
+ self._config[k] = v
+ config.remove_section("main")
+
+ # Read distribution information from the file
+ if "distro" in config.sections():
+ for k,v in config.items("distro"):
+ self._distro[k] = v
+ config.remove_section("distro")
+
+ # Read repository definitions
+ for section in config.sections():
+ if not self._config_repos.has_key(section):
+ self._config_repos[section] = {}
+
+ options = {}
+ for option in config.options(section):
+ options[option] = config.get(section, option)
+
+ self._config_repos[section].update(options)
+
+ self._files.append(filename)
+
+ def get(self, key, default=None):
+ return self._config.get(key, default)
+
+ def set(self, key, val):
+ self._config[key] = val
+
+ def get_repos(self):
+ return self._config_repos.items()
+
+ @property
+ def config_files(self):
+ files = []
+
+ if self.pakfire.builder:
+ path = os.getcwd()
+
+ while not path == "/":
+ _path = os.path.join(path, "config")
+ if os.path.exists(_path):
+ break
+
+ _path = None
+ path = os.path.dirname(path)
+
+ if _path:
+ files.append(os.path.join(_path, "pakfire.conf"))
+ files.append(os.path.join(_path, "default.conf"))
+
+ # Remove non-existant files
+ for f in files:
+ if not os.path.exists(f):
+ files.remove(f)
+
+ if not files:
+ # Return system configuration files
+ files += [CONFIG_FILE]
+ files += [os.path.join(CONFIG_DIR, f) for f in os.listdir(CONFIG_DIR)]
+
+ return files
+
--- /dev/null
+#!/usr/bin/python
+
+import os.path
+
+SYSCONFDIR = os.path.join(os.path.dirname(__file__), "..", "examples")
+
+CONFIG_DIR = os.path.join(SYSCONFDIR, "pakfire.repos.d")
+CONFIG_FILE = os.path.join(SYSCONFDIR, "pakfire.conf")
+
+CACHE_DIR = "/var/cache/pakfire"
+
+PACKAGES_DB = "var/lib/pakfire/packages.db"
+
+BUFFER_SIZE = 1024**2
+
+PACKAGE_FORMAT = 0
+PACKAGE_EXTENSION = "pfm"
+MAKEFILE_EXTENSION = "nm"
+
+PACKAGE_FILENAME_FMT = "%(name)s-%(version)s-%(release)s.%(arch)s.%(ext)s"
+
+BUILD_PACKAGES = ["build-essentials",]
+SHELL_PACKAGES = ["less", "vim",]
+BUILD_ROOT = "/var/lib/pakfire/build"
+
+SOURCE_DOWNLOAD_URL = "http://source.ipfire.org/source-3.x/"
+SOURCE_CACHE_DIR = os.path.join(CACHE_DIR, "sources")
+
+SOURCE_PACKAGE_META = """\
+
+PKG_NAME="%(PKG_NAME)s"
+
+"""
+
+BINARY_PACKAGE_META = """\
+### %(name)s package
+
+VERSION="%(package_format)s"
+
+# Build information
+BUILD_DATE="XXX"
+BUILD_HOST="XXX"
+BUILD_ID="XXX"
+
+# Distribution information
+DISTRO_NAME="%(distro_name)s"
+DISTRO_RELEASE="%(distro_release)s"
+DISTRO_VENDOR="%(distro_vendor)s"
+
+# Package information
+PKG_NAME="%(name)s"
+PKG_VER="%(version)s"
+PKG_REL="%(release)s"
+PKG_EPOCH="%(epoch)s"
+
+PKG_GROUP="%(group)s"
+PKG_ARCH="%(arch)s"
+
+PKG_MAINTAINER="%(maintainer)s"
+PKG_LICENSE="%(license)s"
+PKG_URL="%(url)s"
+
+PKG_SUMMARY="%(summary)s"
+PKG_DESCRIPTION="%(description)s"
+
+# Dependency info
+PKG_DEPS="%(requires)s"
+PKG_PROVIDES="%(provides)s"
+
+PKG_PAYLOAD_COMP="XXX"
+PKG_PAYLOAD_SIZE="107869"
+
+"""
--- /dev/null
+#!/usr/bin/python
+
+import logging
+import os
+import sqlite3
+
+import packages
+
+class Database(object):
+ def __init__(self, filename):
+ self.filename = filename
+ self._db = None
+
+ self.open()
+
+ def __del__(self):
+ if self._db:
+ self._db.commit()
+ self._db.close()
+
+ def create(self):
+ pass
+
+ def open(self):
+ if not self._db:
+ logging.debug("Open database %s" % self.filename)
+
+ dirname = os.path.dirname(self.filename)
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+
+ database_exists = os.path.exists(self.filename)
+
+ # Make a connection to the database.
+ self._db = sqlite3.connect(self.filename)
+ self._db.row_factory = sqlite3.Row
+
+ # Create the database if it was not there, yet.
+ if not database_exists:
+ self.create()
+
+ def close(self):
+ self._db.close()
+
+ def commit(self):
+ self._db.commit()
+
+ def cursor(self):
+ return self._db.cursor()
+
+
+class LocalPackageDatabase(Database):
+ def create(self):
+ c = self.cursor()
+
+ c.executescript("""
+ CREATE TABLE files(
+ name TEXT,
+ pkg INTEGER,
+ size INTEGER,
+ type INTEGER,
+ hash1 TEXT,
+ installed INTEGER,
+ changed INTEGER
+ );
+
+ CREATE TABLE packages(
+ id INTEGER PRIMARY KEY,
+ name TEXT,
+ epoch INTEGER,
+ version TEXT,
+ release TEXT,
+ installed INTEGER,
+ reason TEXT,
+ repository TEXT,
+ hash1 TEXT,
+ provides TEXT,
+ requires TEXT,
+ conflicts TEXT,
+ obsoletes TEXT,
+ license TEXT,
+ summary TEXT,
+ description TEXT,
+ build_id TEXT,
+ build_host TEXT,
+ build_date INT
+ );
+ """)
+ # XXX add some indexes here
+
+ self.commit()
+ c.close()
+
+ def list_packages(self):
+ c = self.cursor()
+ c.execute("SELECT DISTINCT name FROM packages ORDER BY name")
+
+ for pkg in c:
+ yield pkg["name"]
+
+ c.close()
+
+ def add_package(self, pkg, installed=True):
+ c = self.cursor()
+
+ c.execute("INSERT INTO packages(name, epoch, version, release, installed, \
+ provides, requires, build_id, build_host, build_date) \
+ VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (
+ pkg.name,
+ pkg.epoch,
+ pkg.version,
+ pkg.release,
+ int(installed),
+ " ".join(pkg.provides),
+ " ".join(pkg.requires),
+ pkg.build_id,
+ pkg.build_host,
+ pkg.build_date
+ ))
+
+ #c.close()
+
+ # Get the id from the package
+ #c = self.cursor()
+ #c.execute("SELECT * FROM packages WHERE build_id = ? LIMIT 1", (pkg.build_id))
+ c.execute("SELECT * FROM packages WHERE name = ? AND version = ? AND \
+ release = ? AND epoch = ? LIMIT 1", (pkg.name, pkg.version, pkg.release, pkg.epoch))
+
+ ret = None
+ for pkg in c:
+ ret = packages.InstalledPackage(self, pkg)
+ break
+
+ assert ret
+ c.close()
+
+ return ret
+
--- /dev/null
+#!/usr/bin/python
+
+import logging
+import re
+
+import packages
+import repository
+
+
+class Requires(object):
+ def __init__(self, pkg, requires):
+ self.pkg = pkg
+ self.requires = requires
+
+ def __repr__(self):
+ return "<%s %s>" % (self.__class__.__name__, self.requires)
+
+ def __str__(self):
+ return self.requires
+
+ def __cmp__(self, other):
+ if isinstance(other, Provides):
+ return cmp(self.requires, other.provides)
+
+ return cmp(self.requires, other.requires)
+
+ @property
+ def type(self):
+ if self.requires.startswith("/"):
+ return "file"
+
+ elif not re.match("^lib.*\.so.*", self.requires):
+ return "lib"
+
+ return "generic"
+
+
+class Conflicts(object):
+ def __init__(self, pkg, conflicts):
+ self.pkg = pkg
+ self.conflicts = conflicts
+
+ def __repr__(self):
+ return "<%s %s>" % (self.__class__.__name__, self.conflicts)
+
+ def __str__(self):
+ return self.conflicts
+
+
+class Provides(object):
+ def __init__(self, pkg, provides):
+ self.pkg = pkg
+ self.provides = provides
+
+ def __repr__(self):
+ return "<%s %s>" % (self.__class__.__name__, self.provides)
+
+ def __str__(self):
+ return self.provides
+
+
+class Obsoletes(object):
+ def __init__(self, pkg, obsoletes):
+ self.pkg = pkg
+ self.obsoletes = obsoletes
+
+ def __repr__(self):
+ return "<%s %s>" % (self.__class__.__name__, self.obsoletes)
+
+ def __str__(self):
+ return self.obsoletes
+
+
+class DependencySet(object):
+ def __init__(self, pakfire):
+ # Reference all repositories
+ self.repos = pakfire.repos #repository.Repositories()
+
+ # List of packages in this set
+ self.__packages = []
+
+ # Helper lists
+ self.__conflicts = []
+ self.__provides = []
+ self.__requires = []
+ self.__obsoletes = []
+
+ self.__unresolveable = []
+
+ # Read-in all packages from the database that have
+ # been installed previously and need to be taken into
+ # account when resolving dependencies.
+ for pkg in self.repos.local.packages:
+ self.add_package(pkg)
+
+ def add_requires(self, requires, pkg=None):
+ requires = Requires(pkg, requires)
+
+ if requires in self.__requires:
+ return
+
+ if requires in self.__unresolveable:
+ return
+
+ for pkg in self.__packages:
+ if pkg.does_provide(requires):
+ return
+
+ self.__requires.append(requires)
+
+ def add_provides(self, provides, pkg=None):
+ provides = Provides(pkg, provides)
+
+ if provides in self.__conflicts:
+ raise Exception, "Could not add provides"
+
+ while provides in self.__requires:
+ self.__requires.remove(provides)
+
+ self.__provides.append(provides)
+
+ def add_obsoletes(self, obsoletes, pkg=None):
+ obsoletes = Obsoletes(pkg, obsoletes)
+
+ self.__obsoletes.append(obsoletes)
+
+ def add_package(self, pkg):
+ #print pkg, sorted(self.__packages)
+ #assert not pkg in self.__packages
+ if pkg in self.__packages:
+ logging.debug("Trying to add package which is already in the dependency set: %s" % pkg)
+ return
+
+ self.__packages.append(pkg)
+
+ self.add_provides(pkg.name, pkg)
+ for prov in pkg.provides:
+ self.add_provides(prov, pkg)
+
+ for filename in pkg.filelist:
+ self.add_provides(filename, pkg)
+
+ for req in pkg.requires:
+ self.add_requires(req, pkg)
+
+ @property
+ def packages(self):
+ if not self.__requires:
+ return self.__packages[:]
+
+ def resolve(self):
+ unresolveable_reqs = []
+
+ while self.__requires:
+ requires = self.__requires.pop(0)
+ logging.debug("Resolving requirement \"%s\"" % requires)
+
+ # Fetch all candidates from the repositories and save the
+ # best one
+ candidates = packages.PackageListing(self.repos.get_by_provides(requires))
+
+ if not candidates:
+ logging.debug(" Got no candidates for that")
+ unresolveable_reqs.append(requires)
+ continue
+
+ logging.debug(" Got candidates for that:")
+ for candidate in candidates:
+ logging.debug(" --> %s" % candidate)
+
+ best = candidates.get_most_recent()
+ if best:
+ self.add_package(best)
+
+ #print unresolveable_reqs
+
--- /dev/null
+#!/usr/bin/python
+
+import logging
+import os
+import re
+
+from errors import ConfigError
+
+class Distribution(object):
+ def __init__(self, pakfire):
+ self.pakfire = pakfire
+
+ self._data = {
+ "arch" : self.host_arch,
+ "name" : "unknown",
+ "slogan" : "---",
+ "vendor" : "unknown",
+ "version" : "0.0",
+ }
+
+ if not self.pakfire.config._distro:
+ raise ConfigError, "No distribution data was provided in the configuration"
+
+ # Import settings from Config()
+ self._data.update(self.pakfire.config._distro)
+
+ # Dump all data
+ self.dump()
+
+ def dump(self):
+ logging.debug("Distribution configuration:")
+
+ attrs = ("name", "version", "release", "sname", "dist", "vendor", "machine",)
+
+ for attr in attrs:
+ logging.debug(" %s : %s" % (attr, getattr(self, attr)))
+
+ @property
+ def name(self):
+ return self._data.get("name")
+
+ @property
+ def version(self):
+ return self._data.get("version")
+
+ @property
+ def release(self):
+ m = re.match(r"^([0-9]+)\..*", self.version)
+
+ return m.group(1)
+
+ @property
+ def sname(self):
+ return self.name.strip().lower()
+
+ @property
+ def slogan(self):
+ return self._data.get("slogan")
+
+ @property
+ def vendor(self):
+ return self._data.get("vendor")
+
+ def get_arch(self):
+ return self._data.get("arch")
+
+ def set_arch(self, arch):
+ # XXX check if we are allowed to set this arch
+ self._data.set("arch", arch)
+
+ arch = property(get_arch, set_arch)
+
+ @property
+ def dist(self):
+ return self.sname[:2] + self.release
+
+ @property
+ def machine(self):
+ return "%s-%s-linux-gnu" % (self.arch, self.vendor)
+
+ @property
+ def host_arch(self):
+ """
+ Return the architecture of the host we are running on.
+ """
+ return os.uname()[4]
+
+ @property
+ def supported_arches(self):
+ host_arches = {
+ "i686" : [ "i686", "x86_64", ],
+ "i586" : [ "i586", "i686", "x86_64", ],
+ "i486" : [ "i486", "i586", "i686", "x86_64", ],
+ }
+
+ for host, can_be_built in host_arches.items():
+ if self.host_arch in can_be_built:
+ yield host
+
+ def host_supports_arch(self, arch):
+ """
+ Check if this host can build for the target architecture "arch".
+ """
+ return arch in self.supported_arches
+
+ @property
+ def environ(self):
+ """
+ An attribute that adds some environment variables to the
+ chroot environment.
+ """
+ env = {
+ "DISTRO_NAME" : self.name,
+ "DISTRO_SNAME" : self.sname,
+ "DISTRO_VERSION" : self.version,
+ "DISTRO_RELEASE" : self.release,
+ "DISTRO_DISTTAG" : self.dist,
+ "DISTRO_ARCH" : self.arch,
+ "DISTRO_MACHINE" : self.machine,
+ "DISTRO_VENDOR" : self.vendor,
+ }
+
+ return env
+
+ @property
+ def info(self):
+ info = {}
+
+ for k, v in self.environ.items():
+ info[k.lower()] = v
+
+ return info
+
--- /dev/null
+#!/usr/bin/python
+
+class Error(Exception):
+ pass
+
+class BuildError(Error):
+ pass
+
+class BuildRootLocked(Error):
+ pass
+
+class ConfigError(Error):
+ pass
+
+class DownloadError(Error):
+ pass
+
--- /dev/null
+#!/usr/bin/python
+
+"""
+ The translation process of all strings is done in here.
+"""
+
+import gettext
+
+"""
+ A function that returnes the same string.
+"""
+N_ = lambda x: x
+
+
+"""
+ A function that returnes the translation of a string if available.
+
+ The language is taken from the system environment.
+"""
+# Enable this to have translation in the development environment.
+# gettext.bindtextdomain("pakfire", "build/mo")
+
+_ = lambda x: gettext.ldgettext("pakfire", x)
+
--- /dev/null
+#!/usr/bin/python
+
+import logging
+import os
+
+import packages
+
+class Index(object):
+ def __init__(self, pakfire):
+ self.pakfire = pakfire
+
+ self.arch = self.pakfire.distro.arch # XXX ???
+
+ self._packages = []
+
+ def get_all(self):
+ for package in self.packages:
+ yield package
+
+ def get_all_by_name(self, name):
+ for package in self.packages:
+ if package.name == name:
+ yield package
+
+ def get_latest_by_name(self, name):
+ p = [p for p in self.get_all_by_name(name)]
+ if not p:
+ return
+
+ # Get latest version of the package to the bottom of
+ # the list.
+ p.sort()
+
+ # Return the last one.
+ return p[-1]
+
+ @property
+ def packages(self):
+ for pkg in self._packages:
+ yield pkg
+
+ @property
+ def package_names(self):
+ names = []
+ for name in [p.name for p in self.packages]:
+ if not name in names:
+ names.append(name)
+
+ return sorted(names)
+
+ def update(self, force=False):
+ raise NotImplementedError
+
+
+class DirectoryIndex(Index):
+ def __init__(self, pakfire, path):
+ self.path = path
+
+ Index.__init__(self, pakfire)
+
+ def update(self, force=False):
+ logging.debug("Updating repository index '%s' (force=%s)" % (self.path, force))
+
+ for file in os.listdir(self.path):
+ file = os.path.join(self.path, file)
+
+ package = packages.BinaryPackage(file)
+
+ if not package.arch in (self.arch, "noarch"):
+ logging.warning("Skipped package with wrong architecture: %s (%s)" \
+ % (package.filename, package.arch))
+ continue
+
+ self._packages.append(package)
+
+
+class InstalledIndex(Index):
+ def __init__(self, pakfire, db):
+ self.db = db
+
+ Index.__init__(self, pakfire)
+
+ def get_all_by_name(self, name):
+ c = self.db.cursor()
+ c.execute("SELECT * FROM packages WHERE name = ?", name)
+
+ for pkg in c:
+ yield package.InstalledPackage(self.db, pkg)
+
+ c.close()
+
+ @property
+ def package_names(self):
+ c = self.db.cursor()
+ c.execute("SELECT DISTINCT name FROM packages ORDER BY name")
+
+ for pkg in c:
+ yield pkg["name"]
+
+ c.close()
+
+ @property
+ def packages(self):
+ c = self.db.cursor()
+ c.execute("SELECT * FROM packages")
+
+ for pkg in c:
+ yield packages.InstalledPackage(self.db, pkg)
+
+ c.close()
+
+
+
+
+if __name__ == "__main__":
+ di = DirectoryIndex("/ipfire-3.x/build/packages/i686", "i686")
+
+ for package in di.packages:
+ print package
+
+ print di.package_names
+ print di.get_latest_by_name("ccache")
+ print [p for p in di.get_all_by_name("ccache")]
+
--- /dev/null
+#!/usr/bin/python
+
+import logging
+
+def setup_logging(config):
+ """
+ This function initialized the logger that is enabled immediately.
+ """
+
+ l = logging.getLogger()
+
+ if len(l.handlers) > 1:
+ logging.debug("Logging was already set up. Don't do this again.")
+ return
+
+ # Remove all previous defined handlers
+ for handler in l.handlers:
+ l.removeHandler(handler)
+
+ if config.get("debug"):
+ l.setLevel(logging.DEBUG)
+
+ handler = logging.StreamHandler()
+ l.addHandler(handler)
+
+ handler = logging.FileHandler(config.get("logfile"))
+ handler.setLevel(logging.DEBUG)
+ l.addHandler(handler)
--- /dev/null
+#!/usr/bin/python
+
+from binary import BinaryPackage
+from installed import InstalledPackage
+from source import SourcePackage
+from virtual import VirtualPackage
+
+from listing import PackageListing
+from make import Makefile
+from packager import Packager
--- /dev/null
+#!/usr/bin/python
+
+# XXX Maybe we could do this with something like
+# libarchive to make it really fast.
+
+import grp
+import os
+import pwd
+import stat
+import time
+
+def ftype(mode):
+ if stat.S_ISBLK(mode):
+ return "b"
+ elif stat.S_ISCHR(mode):
+ return "c"
+ elif stat.S_ISDIR(mode):
+ return "d"
+ elif stat.S_ISREG(mode):
+ return "-"
+ elif stat.S_ISFIFO(mode):
+ return "p"
+ elif stat.S_ISLINK(mode):
+ return "l"
+ elif stat.S_ISSOCK(mode):
+ return "s"
+ return "?"
+
+def rwx(mode):
+ ret = ""
+ if mode & stat.S_IRUSR:
+ ret += "r"
+ else:
+ ret += "-"
+
+ if mode & stat.S_IWUSR:
+ ret += "w"
+ else:
+ ret += "-"
+
+ if mode & stat.S_IXUSR:
+ ret += "x"
+ else:
+ ret += "-"
+
+ return ret
+
+def fmode(mode):
+ ret = ftype(mode)
+ ret += rwx((mode & 0700) << 0)
+ ret += rwx((mode & 0070) << 3)
+ ret += rwx((mode & 0007) << 6)
+ return ret
+
+class CpioError(Exception):
+ pass
+
+
+class CpioEntry(object):
+ def __init__(self, hdr, archive, offset):
+ self.archive = archive
+ self.hdr = hdr
+
+ self.offset = offset + 110 + self.namesize
+ self.offset += (4 - (self.offset % 4)) % 4
+ self.current = 0
+
+ self.closed = False
+
+ if len(self.hdr) < 110:
+ raise CpioError("Header too short.")
+
+ if not self.hdr.startswith("070701") and not self.hdr.startswith("070702"):
+ raise CpioError("Invalid header: %s" % self.hdr[:6])
+
+ def close(self):
+ self.closed = True
+
+ def flush(self):
+ pass # noop
+
+ def read(self, size=None):
+ """Read data from the entry.
+
+ Keyword arguments:
+ size -- Number of bytes to read (default: whole entry)
+ """
+ if self.closed:
+ raise ValueError("Read operation on closed file.")
+
+ self.archive.file.seek(self.offset + self.current, os.SEEK_SET)
+
+ if size and size < self.size - self.current:
+ ret = self.archive.file.read(size)
+ else:
+ ret = self.archive.file.read(self.size - self.current)
+ self.current += len(ret)
+ return ret
+
+ def seek(self, offset, whence=0):
+ """Move to new position within an entry.
+
+ Keyword arguments:
+ offset -- Byte count
+ whence -- Describes how offset is used.
+ 0: From beginning of file
+ 1: Forwards from current position
+ 2: Backwards from current position
+ Other values are ignored.
+ """
+ if self.closed:
+ raise ValueError("Seek operation on closed file.")
+
+ if whence == os.SEEK_SET:
+ self.current = offset
+ elif whence == os.SEEK_REL:
+ self.current += offset
+ elif whence == os.SEEK_END:
+ self.current -= offset
+
+ self.current = min(max(0, self.current), self.size)
+
+ def tell(self):
+ """Get current position within an entry"""
+ if self.closed:
+ raise ValueError("Tell operation on closed file.")
+ return self.current
+
+ def __repr__(self):
+ return "<CpioEntry %s 0x%s>" % (self.name, self.checksum,)
+
+ @property
+ def checksum(self):
+ return int(self.hdr[102:110], 16)
+
+ @property
+ def devmajor(self):
+ return int(self.hdr[62:70], 16)
+
+ @property
+ def devminor(self):
+ return int(self.hdr[70:78], 16)
+
+ @property
+ def gid(self):
+ return int(self.hdr[30:38], 16)
+
+ @property
+ def inode(self):
+ return int(self.hdr[6:14], 16)
+
+ @property
+ def mode(self):
+ return int(self.hdr[14:22], 16)
+
+ @property
+ def mtime(self):
+ return int(self.hdr[46:54], 16)
+
+ @property
+ def name(self):
+ end = 110 + self.namesize - 1
+ return self.hdr[110:end]
+
+ @property
+ def namesize(self):
+ return int(self.hdr[94:102], 16)
+
+ @property
+ def nlinks(self):
+ return int(self.hdr[38:46], 16)
+
+ @property
+ def rdevmajor(self):
+ return int(self.hdr[78:86], 16)
+
+ @property
+ def rdevminor(self):
+ return int(self.hdr[86:94], 16)
+
+ @property
+ def size(self):
+ return int(self.hdr[54:62], 16)
+
+ @property
+ def uid(self):
+ return int(self.hdr[22:30], 16)
+
+
+class CpioArchive(object):
+ def __init__(self, filename):
+
+ self.filename = filename
+ self.file = open(self.filename, "r")
+ self.__readfile()
+
+ self.closed = False
+
+ def close(self):
+ if self.closed:
+ return
+ self.closed = True
+
+ self.file.close()
+
+ def __readfile(self):
+ if not self.file:
+ raise CpioError("File was not yet opened.")
+
+ self._entries = []
+ sposition = self.file.tell()
+ hdr = self.file.read(110)
+ while hdr:
+ namelen = int(hdr[94:102], 16) # Length of the name
+ hdr += self.file.read(namelen)
+ ce = CpioEntry(hdr, self, sposition)
+ if ce.name == "TRAILER!!!":
+ return
+ self._entries.append(ce)
+
+ self.file.seek((4 - (self.file.tell()-sposition) % 4) % 4, os.SEEK_CUR)
+ self.file.seek(ce.size, os.SEEK_CUR)
+ self.file.seek((4 - (self.file.tell()-sposition) % 4) % 4, os.SEEK_CUR)
+
+ sposition = self.file.tell()
+ hdr = self.file.read(110)
+ else:
+ raise CpioError("Premature end of headers.")
+
+ @property
+ def entries(self):
+ return sorted(self._entries)
+
+ @property
+ def size(self):
+ return os.path.getsize(self.filename)
+
+ def ls(self):
+ for x in self.entries:
+ print x.name
+
+ def ll(self):
+ for x in self.entries:
+ print "%s %s %s %s %9d %s %s" % \
+ (fmode(x.mode),
+ x.nlinks,
+ pwd.getpwuid(x.uid)[0],
+ grp.getgrgid(x.gid)[0],
+ x.size,
+ time.strftime("%Y-%m-%d %H:%M", time.localtime(x.mtime)),
+ x.name,)
+
+ def get(self, item):
+ for x in self._entries:
+ if x.name == item:
+ return x
+ raise KeyError("No such file or directory.")
+
+ def __getitem__(self, item):
+ x = self.get(item)
+ x.seek(0)
+ return x.read()
--- /dev/null
+#!/usr/bin/python
+
+import logging
+import re
+
+import _io_ as io
+import util
+
+from pakfire.i18n import _
+
+class Package(object):
+ type = None # either "bin", "src" or "virt"
+
+ def __init__(self, filename):
+ self.filename = filename
+
+ def __repr__(self):
+ return "<%s %s>" % (self.__class__.__name__, self.filename)
+
+ def __cmp__(self, other):
+ # if packages differ names return in alphabetical order
+ if not self.name == other.name:
+ return cmp(self.name, other.name)
+
+ ret = util.version_compare((self.epoch, self.version, self.release),
+ (other.epoch, other.version, other.release))
+
+ #if ret == 0:
+ # logging.debug("%s is equal to %s" % (self, other))
+ #elif ret < 0:
+ # logging.debug("%s is more recent than %s" % (other, self))
+ #elif ret > 0:
+ # logging.debug("%s is more recent than %s" % (self, other))
+
+ return ret
+
+ def dump(self, short=False):
+ if short:
+ return "%s.%s : %s" % (self.name, self.arch, self.summary)
+
+ items = [
+ (_("Name"), self.name),
+ (_("Arch"), self.arch),
+ (_("Version"), self.version),
+ (_("Release"), self.release),
+ (_("Size"), util.format_size(self.size)),
+# (_("Repo"), self.repo),
+ (_("Summary"), self.summary),
+# (_("URL"), self.url),
+ (_("License"), self.license),
+ ]
+
+ caption = _("Description")
+ for line in util.text_wrap(self.description):
+ items.append((caption, line))
+ caption = ""
+
+ format = "%%-%ds : %%s" % (max([len(k) for k, v in items]))
+
+ s = []
+ for caption, value in items:
+ s.append(format % (caption, value))
+
+ s.append("") # New line at the end
+
+ return "\n".join(s)
+
+ @property
+ def data(self):
+ """
+ Link to the datafile that only gets established if
+ we need access to it.
+ """
+ if not hasattr(self, "_data"):
+ self._data = io.CpioArchive(self.filename)
+
+ return self._data
+
+ @property
+ def info(self):
+ info = {
+ "name" : self.name,
+ "version" : self.version,
+ "release" : self.release,
+ "epoch" : self.epoch,
+ "arch" : self.arch,
+ "group" : self.group,
+ "summary" : self.summary,
+ "description" : self.description,
+ "maintainer" : self.maintainer,
+ "url" : self.url,
+ "license" : self.license,
+ }
+
+ return info
+
+ @property
+ def size(self):
+ """
+ Return the size of the package file.
+ """
+ return self.data.size
+
+ def get_file(self, filename):
+ """
+ Get a file descriptor for the file.
+
+ Raises KeyError if file is not available.
+ """
+ return self.data.get(filename)
+
+ ### META INFORMATION ###
+
+ @property
+ def metadata(self):
+ if not hasattr(self, "_metadata"):
+ info = self.get_file("info")
+ _metadata = {}
+
+ for line in info.read().splitlines():
+ m = re.match(r"^(\w+)=(.*)$", line)
+ if m is None:
+ continue
+
+ key, val = m.groups()
+ _metadata[key] = val.strip("\"")
+
+ self._metadata = _metadata
+
+ return self._metadata
+
+ @property
+ def friendly_name(self):
+ return "%s-%s" % (self.name, self.friendly_version)
+
+ @property
+ def friendly_version(self):
+ s = "%s-%s" % (self.version, self.release)
+
+ if self.epoch:
+ s = "%d:%s" % (self.epoch, s)
+
+ return s
+
+ @property
+ def repo(self):
+ return "XXX"
+
+ @property
+ def name(self):
+ return self.metadata.get("PKG_NAME")
+
+ @property
+ def version(self):
+ return self.metadata.get("PKG_VER")
+
+ @property
+ def release(self):
+ ret = None
+
+ for i in ("PKG_RELEASE", "PKG_REL"):
+ ret = self.metadata.get(i, None)
+ if ret:
+ break
+
+ return ret
+
+ @property
+ def epoch(self):
+ epoch = self.metadata.get("PKG_EPOCH", 0)
+
+ return int(epoch)
+
+ @property
+ def arch(self):
+ raise NotImplementedError
+
+ @property
+ def maintainer(self):
+ return self.metadata.get("PKG_MAINTAINER")
+
+ @property
+ def license(self):
+ return self.metadata.get("PKG_LICENSE")
+
+ @property
+ def summary(self):
+ return self.metadata.get("PKG_SUMMARY")
+
+ @property
+ def description(self):
+ return self.metadata.get("PKG_DESCRIPTION")
+
+ @property
+ def group(self):
+ return self.metadata.get("PKG_GROUP")
+
+ @property
+ def url(self):
+ return self.metadata.get("PKG_URL")
+
+ @property
+ def signature(self):
+ f = self.get_file("signature")
+ f.seek(0)
+ sig = f.read()
+ f.close()
+
+ return sig or None
+
+ @property
+ def build_date(self):
+ return self.metadata.get("BUILD_DATE")
+
+ @property
+ def build_host(self):
+ return self.metadata.get("BUILD_HOST")
+
+ @property
+ def build_id(self):
+ return self.metadata.get("BUILD_ID")
+
+ ### methods ###
+
+ def does_provide(self, requires):
+ # If the provides string equals the name of the package, we
+ # return true.
+ if self.name == requires.requires:
+ return True
+
+ if requires.type == "file":
+ return requires.requires in self.filelist
+
+ # Get all provide strings from the package data
+ # and return true if requires is matched. Otherwise return false.
+ provides = self.provides
+
+ return requires.requires in provides
+
+ # XXX this function has to do lots more of magic:
+ # e.g. filename matches, etc.
+
+ def extract(self, path):
+ raise NotImplementedError
+
--- /dev/null
+#!/usr/bin/python
+
+import sys
+
+import packager
+
+from base import Package
+
+class BinaryPackage(Package):
+ type = "bin"
+
+ @property
+ def arch(self):
+ return self.metadata.get("PKG_ARCH")
+
+ def extract(self, path):
+ pass
+
+ @property
+ def requires(self):
+ ret = ""
+
+ for i in ("PKG_REQUIRES", "PKG_DEPS"):
+ ret = self.metadata.get(i, ret)
+ if ret:
+ break
+
+ return ret.split()
+
+ @property
+ def provides(self):
+ return self.metadata.get("PKG_PROVIDES").split()
+
+ @property
+ def filelist(self):
+ # XXX this needs to be very fast
+ # and is totally broken ATM
+ f = self.get_file("filelist")
+ f.seek(0)
+
+ return f.read().split()
+
+ def get_extractor(self, pakfire):
+ return packager.Extractor(pakfire, self)
+
+
+if __name__ == "__main__":
+ for pkg in sys.argv[1:]:
+ pkg = BinaryPackage(pkg)
+
+ fmt = "%-10s : %s"
+
+ items = (
+ ("Name", pkg.name),
+ ("Version", pkg.version),
+ ("Release", pkg.release),
+ ("Epoch", pkg.epoch),
+ ("Size", pkg.size),
+ ("Arch", pkg.arch),
+ ("Signature", pkg.signature),
+ )
+
+ for item in items:
+ print fmt % item
+
+ print fmt % ("Filelist", "")
+ print "\n".join([" %s" % f for f in pkg.filelist])
+
+ print pkg.filelist
+
+ print
+
--- /dev/null
+#!/usr/bin/python
+
+import hashlib
+import time
+
+import util
+
+from base import Package
+
+
+# XXX maybe this gets renamed to "DatabasePackage" or something similar.
+
+class InstalledPackage(Package):
+ type = "installed"
+
+ def __init__(self, db, data):
+ self.db = db
+
+ self._data = {}
+
+ for key in data.keys():
+ self._data[key] = data[key]
+
+ def __repr__(self):
+ return "<%s %s>" % (self.__class__.__name__, self.friendly_name)
+
+ @property
+ def metadata(self):
+ return self._data
+
+ @property
+ def id(self):
+ id = self.metadata.get("id")
+ if not id:
+ id = 0
+
+ return id
+
+ @property
+ def name(self):
+ return self.metadata.get("name")
+
+ @property
+ def version(self):
+ return self.metadata.get("version")
+
+ @property
+ def release(self):
+ return self.metadata.get("release")
+
+ @property
+ def epoch(self):
+ epoch = self.metadata.get("epoch", 0)
+
+ return int(epoch)
+
+ @property
+ def arch(self):
+ return self.metadata.get("arch")
+
+ @property
+ def maintainer(self):
+ return self.metadata.get("maintainer")
+
+ @property
+ def license(self):
+ return self.metadata.get("license")
+
+ @property
+ def summary(self):
+ return self.metadata.get("summary")
+
+ @property
+ def description(self):
+ return self.metadata.get("description")
+
+ @property
+ def group(self):
+ return self.metadata.get("group")
+
+ @property
+ def build_data(self):
+ return self.metadata.get("build_data")
+
+ @property
+ def build_host(self):
+ return self.metadata.get("build_host")
+
+ @property
+ def build_id(self):
+ return self.metadata.get("build_id")
+
+ @property
+ def provides(self):
+ provides = self.metadata.get("provides")
+
+ if provides:
+ return provides.split()
+
+ return []
+
+ @property
+ def requires(self):
+ requires = self.metadata.get("requires")
+
+ if requires:
+ return requires.split()
+
+ return []
+
+ @property
+ def conflicts(self):
+ conflicts = self.metadata.get("conflicts")
+
+ if conflicts:
+ return conflicts.split()
+
+ return []
+
+ @property
+ def filelist(self):
+ c = self.db.cursor()
+ c.execute("SELECT name FROM files WHERE pkg = '%s'" % self.id) # XXX?
+
+ for f in c:
+ yield f["name"]
+
+ c.close()
+
+ ## database methods
+
+ def set_installed(self, installed):
+ c = self.db.cursor()
+ c.execute("UPDATE packages SET installed = ? WHERE id = ?", (installed, self.id))
+ c.close()
+
+ def add_file(self, filename, type=None, size=None, hash1=None, **kwargs):
+ if not hash1:
+ hash1 = util.calc_hash1(filename)
+
+ if size is None:
+ size = os.path.getsize(filename)
+
+ c = self.db.cursor()
+ c.execute("INSERT INTO files(name, pkg, size, type, hash1, installed) \
+ VALUES(?, ?, ?, ?, ?, ?)",
+ (filename, self.id, size, type, hash1, time.time()))
+ c.close()
+
--- /dev/null
+#!/usr/bin/python
+
+import logging
+
+class PackageListing(object):
+ def __init__(self, packages=[]):
+ self.__packages = []
+
+ if packages:
+ for package in packages:
+ self.__packages.append(package)
+
+ self.__packages.sort()
+
+ def __repr__(self):
+ return "<PackageListing (%d) %s>" % (len(self.__packages),
+ [p.friendly_name for p in self.__packages])
+
+ def __iter__(self):
+ return iter(self.__packages)
+
+ def __len__(self):
+ return len(self.__packages)
+
+ def get_most_recent(self):
+ if self.__packages:
+ return self.__packages[-1]
+
--- /dev/null
+#!/usr/bin/python
+
+import os
+import tarfile
+
+from urlgrabber.grabber import URLGrabber, URLGrabError
+from urlgrabber.progress import TextMeter
+
+import packager
+
+from base import Package
+from source import SourcePackage
+from virtual import VirtualPackage
+from pakfire.errors import DownloadError
+from pakfire.constants import *
+
+class SourceDownloader(object):
+ def __init__(self, pakfire):
+ self.pakfire = pakfire
+
+ self.grabber = URLGrabber(
+ prefix = self.pakfire.config.get("source_download_url"),
+ progress_obj = TextMeter(),
+ quote = 0,
+ )
+
+ def download(self, filename):
+ filename = os.path.join(SOURCE_CACHE_DIR, filename)
+
+ if os.path.exists(filename):
+ return filename
+
+ dirname = os.path.dirname(filename)
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+
+ try:
+ self.grabber.urlgrab(os.path.basename(filename), filename=filename)
+ except URLGrabError, e:
+ raise DownloadError, "%s %s" % (os.path.basename(filename), e)
+
+ return filename
+
+
+class Makefile(Package):
+ @property
+ def files(self):
+ basedir = os.path.dirname(self.filename)
+
+ for dirs, subdirs, files in os.walk(basedir):
+ for f in files:
+ yield os.path.join(dirs, f)
+
+ def extract(self, env):
+ # Copy all files that belong to the package
+ for f in self.files:
+ _f = f[len(os.path.dirname(self.filename)):]
+ env.copyin(f, "/build/%s" % _f)
+
+ downloader = SourceDownloader(env.pakfire)
+ for filename in env.make_sources():
+ _filename = downloader.download(filename)
+
+ if _filename:
+ env.copyin(_filename, "/build/files/%s" % os.path.basename(_filename))
+
+ @property
+ def package_filename(self):
+ return PACKAGE_FILENAME_FMT % {
+ "arch" : self.arch,
+ "ext" : PACKAGE_EXTENSION,
+ "name" : self.name,
+ "release" : self.release,
+ "version" : self.version,
+ }
+
+ @property
+ def arch(self):
+ """
+ This is only used to create the name of the source package.
+ """
+ return "src"
+
+ def dist(self, env):
+ """
+ Create a source package in env.
+
+ We assume that all requires files are in /build.
+ """
+ basedir = env.chrootPath("build")
+
+ files = {
+ "data.img" : env.chrootPath("tmp/data.img"),
+ "signature" : env.chrootPath("tmp/signature"),
+ "info" : env.chrootPath("tmp/info"),
+ }
+
+ # Package all files.
+ a = tarfile.open(files["data.img"], "w")
+ for dir, subdirs, _files in os.walk(basedir):
+ for file in _files:
+ file = os.path.join(dir, file)
+
+ a.add(file, arcname=file[len(basedir):])
+ a.close()
+
+ # XXX add compression for the sources
+
+ # Create an empty signature.
+ f = open(files["signature"], "w")
+ f.close()
+
+ pkg = VirtualPackage(env.make_info)
+
+ # Save meta information.
+ f = open(files["info"], "w")
+ f.write(SOURCE_PACKAGE_META % {
+ "PKG_NAME" : pkg.name,
+ })
+ f.close()
+
+ result = env.chrootPath("result", "src", pkg.filename)
+ resultdir = os.path.dirname(result)
+ if not os.path.exists(resultdir):
+ os.makedirs(resultdir)
+
+ f = tarfile.open(result, "w")
+ for arcname, name in files.items():
+ f.add(name, arcname=arcname, recursive=False)
+
+ f.close()
+
--- /dev/null
+#!/usr/bin/python
+
+import glob
+import logging
+import lzma
+import os
+import progressbar
+import sys
+import tarfile
+import tempfile
+import xattr
+
+from pakfire.constants import *
+from pakfire.i18n import _
+
+class Extractor(object):
+ def __init__(self, pakfire, pkg):
+ self.pakfire = pakfire
+ self.pkg = pkg
+
+ self.data = pkg.get_file("data.img")
+
+ self.archive = None
+ self._tempfile = None
+
+ self._uncompress_data()
+
+ def cleanup(self):
+ # XXX not called by anything
+ if self._tempfile:
+ os.unlink(self._tempfile)
+
+ def _uncompress_data(self):
+ # XXX this function uncompresses the data.img file
+ # and saves the bare tarball to /tmp which takes a lot
+ # of space.
+
+ self.data.seek(0)
+
+ # Create a temporary file to save the content in
+ f, self._tempfile = tempfile.mkstemp()
+ f = open(self._tempfile, "w")
+
+ decompressor = lzma.LZMADecompressor()
+
+ buf = self.data.read(BUFFER_SIZE)
+ while buf:
+ f.write(decompressor.decompress(buf))
+
+ buf = self.data.read(BUFFER_SIZE)
+
+ f.write(decompressor.flush())
+ f.close()
+
+ self.archive = tarfile.open(self._tempfile)
+
+ @property
+ def files(self):
+ return self.archive.getnames()
+
+ def extractall(self, path="/", callback=None):
+ pbar = self._make_progressbar()
+
+ if pbar:
+ pbar.start()
+ else:
+ print " %s %-20s" % (_("Extracting"), self.pkg.name)
+
+ i = 0
+ for name in self.files:
+ i += 1
+ self.extract(name, path, callback=callback)
+
+ if pbar:
+ pbar.update(i)
+
+ if pbar:
+ pbar.finish()
+ #sys.stdout.write("\n")
+
+ def extract(self, filename, path="/", callback=None):
+ member = self.archive.getmember(filename)
+ target = os.path.join(path, filename)
+
+ # If the member is a directory and if it already exists, we
+ # don't need to create it again.
+ if member.isdir() and os.path.exists(target):
+ return
+
+ if self.pakfire.config.get("debug"):
+ msg = "Creating file (%s:%03d:%03d) " % \
+ (tarfile.filemode(member.mode), member.uid, member.gid)
+ if member.issym():
+ msg += "/%s -> %s" % (member.name, member.linkname)
+ elif member.islnk():
+ msg += "/%s link to /%s" % (member.name, member.linkname)
+ else:
+ msg += "/%s" % member.name
+ logging.debug(msg)
+
+ # Remove file if it has been existant
+ if not member.isdir() and os.path.exists(target):
+ os.unlink(target)
+
+ self.archive.extract(member, path=path)
+
+ # XXX implement setting of xattrs/acls here
+
+ if callback and not member.isdir():
+ callback(member.name, hash1="XXX", size=member.size)
+
+ def _make_progressbar(self):
+ # Don't display a progressbar if we are running in debug mode.
+ if self.pakfire.config.get("debug"):
+ return
+
+ if not sys.stdout.isatty():
+ return
+
+ widgets = [
+ " ",
+ "%s %-20s" % (_("Extracting:"), self.pkg.name),
+ " ",
+ progressbar.Bar(left="[", right="]"),
+ " ",
+# progressbar.Percentage(),
+# " ",
+ progressbar.ETA(),
+ " ",
+ ]
+
+ return progressbar.ProgressBar(
+ widgets=widgets,
+ maxval=len(self.files),
+ term_width=80,
+ )
+
+
+class InnerTarFile(tarfile.TarFile):
+ def __init__(self, *args, **kwargs):
+ # Force the pax format
+ kwargs["format"] = tarfile.PAX_FORMAT
+
+ if kwargs.has_key("env"):
+ self.env = kwargs.pop("env")
+
+ tarfile.TarFile.__init__(self, *args, **kwargs)
+
+ def __filter_xattrs(self, tarinfo):
+ logging.debug("Adding file: %s" % tarinfo.name)
+
+ filename = self.env.chrootPath(self.env.buildroot, tarinfo.name)
+ for attr, value in xattr.get_all(filename):
+ tarinfo.pax_headers[attr] = value
+
+ logging.debug(" xattr: %s=%s" % (attr, value))
+
+ return tarinfo
+
+ def add(self, *args, **kwargs):
+ # Add filter for xattrs if no other filter is set.
+ if not kwargs.has_key("filter") and len(args) < 5:
+ kwargs["filter"] = self.__filter_xattrs
+
+ tarfile.TarFile.add(self, *args, **kwargs)
+
+
+# XXX this is totally ugly and needs to be done right!
+
+class Packager(object):
+ ARCHIVE_FILES = ("info", "signature", "data.img")
+
+ def __init__(self, pakfire, pkg, env):
+ self.pakfire = pakfire
+ self.pkg = pkg
+ self.env = env
+
+ self.tarball = None
+
+ # Store meta information
+ self.info = {
+ "package_format" : PACKAGE_FORMAT,
+ }
+ self.info.update(self.pkg.info)
+ self.info.update(self.pakfire.distro.info)
+
+ ### Create temporary files
+ # Create temp directory to where we extract all files again and
+ # gather some information about them like requirements and provides.
+ self.tempdir = self.env.chrootPath("tmp", "%s_data" % self.pkg.friendly_name)
+ if not os.path.exists(self.tempdir):
+ os.makedirs(self.tempdir)
+
+ # Create files that have the archive data
+ self.archive_files = {}
+ for i in self.ARCHIVE_FILES:
+ self.archive_files[i] = \
+ self.env.chrootPath("tmp", "%s_%s" % (self.pkg.friendly_name, i))
+
+ def __call__(self):
+ logging.debug("Packaging %s" % self.pkg.friendly_name)
+
+ # Create the tarball and add all data to it.
+ self.create_tarball()
+
+ chroot_tempdir = self.tempdir[len(self.env.chrootPath()):]
+ self.info.update({
+ "requires" : self.env.do("/usr/lib/buildsystem-tools/dependency-tracker requires %s" % chroot_tempdir,
+ returnOutput=True),
+ "provides" : self.env.do("/usr/lib/buildsystem-tools/dependency-tracker provides %s" % chroot_tempdir,
+ returnOutput=True),
+ })
+
+ self.create_info()
+ self.create_signature()
+
+ # Create the outer tarball.
+ resultdir = os.path.join(self.env.chrootPath("result", self.pkg.arch))
+ if not os.path.exists(resultdir):
+ os.makedirs(resultdir)
+
+ filename = os.path.join(resultdir, self.pkg.filename)
+
+ tar = tarfile.TarFile(filename, mode="w", format=tarfile.PAX_FORMAT)
+
+ for i in self.ARCHIVE_FILES:
+ tar.add(self.archive_files[i], arcname=i)
+
+ tar.close()
+
+ def create_tarball(self):
+ tar = InnerTarFile(self.archive_files["data.img"], mode="w", env=self.env)
+
+ files = []
+ for pattern in self.pkg.file_patterns:
+ if pattern.startswith("/"):
+ pattern = pattern[1:]
+ pattern = self.env.chrootPath(self.env.buildroot, pattern)
+
+ # Recognize the type of the pattern. Patterns could be a glob
+ # pattern that is expanded here or just a directory which will
+ # be included recursively.
+ if "*" in pattern or "?" in pattern:
+ files += glob.glob(pattern)
+
+ elif os.path.exists(pattern):
+ # Add directories recursively...
+ if os.path.isdir(pattern):
+ for dir, subdirs, _files in os.walk(pattern):
+ for file in _files:
+ file = os.path.join(dir, file)
+ files.append(file)
+
+ # all other files are just added.
+ else:
+ files.append(pattern)
+
+ else:
+ logging.warning("Unrecognized pattern type: %s" % pattern)
+
+ files.sort()
+
+ for file_real in files:
+ file_tar = file_real[len(self.env.chrootPath(self.env.buildroot)) + 1:]
+
+ tar.add(file_real, arcname=file_tar)
+ if not os.path.isdir(file_real):
+ os.unlink(file_real)
+
+ # Dump all files that are in the archive.
+ tar.list()
+
+ # Write all data to disk.
+ tar.close()
+
+ # Reopen the tarfile in read mode and extract all content to tempdir
+ tar = InnerTarFile(self.archive_files["data.img"])
+ tar.extractall(path=self.tempdir)
+ tar.close()
+
+ # XXX compress the tarball here
+
+ def create_info(self):
+ f = open(self.archive_files["info"], "w")
+ f.write(BINARY_PACKAGE_META % self.info)
+ f.close()
+
+ def create_signature(self):
+ # Create an empty signature.
+ f = open(self.archive_files["signature"], "w")
+ f.close()
--- /dev/null
+#!/usr/bin/python
+
+from base import Package
+
+class SourcePackage(Package):
+ type = "src"
+
+ @property
+ def arch(self):
+ return self.type
+
+ def extract(self, path):
+ pass
+
+ @property
+ def requires(self):
+ """
+ Return the requirements for the build.
+ """
+ return self.metadata.get("PKG_REQUIRES", "").split()
+
--- /dev/null
+#!/usr/bin/python
+
+from __future__ import division
+
+import hashlib
+
+from pakfire.constants import *
+
+def version_compare_epoch(e1, e2):
+ return cmp(e1, e2)
+
+def version_compare_version(v1, v2):
+ return cmp(v1, v2)
+
+def version_compare_release(r1, r2):
+ return cmp(r1, r2)
+
+def version_compare((e1, v1, r1), (e2, v2, r2)):
+
+ ret = version_compare_epoch(e1, e2)
+ if not ret == 0:
+ return ret
+
+ ret = version_compare_version(v1, v2)
+ if not ret == 0:
+ return ret
+
+ return version_compare_release(r1, r2)
+
+def text_wrap(s, length=65):
+ t = []
+ s = s.split()
+
+ l = []
+ for word in s:
+ l.append(word)
+
+ if len(" ".join(l)) >= length:
+ t.append(l)
+ l = []
+
+ if l:
+ t.append(l)
+
+ return [" ".join(l) for l in t]
+
+def format_size(s):
+ units = ("B", "k", "M", "G", "T")
+ unit = 0
+
+ while s >= 1024 and unit < len(units):
+ s /= 1024
+ unit += 1
+
+ return "%d%s" % (int(s), units[unit])
+
+def calc_hash1(filename):
+ h = hashlib.sha1()
+
+ f = open(filename)
+ buf = f.read(BUFFER_SIZE)
+ while buf:
+ h.update(buf)
+ buf = f.read(BUFFER_SIZE)
+
+ f.close()
+ return h.hexdigest()
--- /dev/null
+#!/usr/bin/python
+
+from base import Package
+
+from pakfire.constants import *
+
+
+class VirtualPackage(Package):
+ type = "virt"
+
+ def __init__(self, data):
+ self._data = {}
+
+ for key in data.keys():
+ self._data[key] = data[key]
+
+ def __repr__(self):
+ return "<%s %s>" % (self.__class__.__name__, self.friendly_name)
+
+ @property
+ def metadata(self):
+ return self._data
+
+ @property
+ def filename(self):
+ return PACKAGE_FILENAME_FMT % {
+ "arch" : self.arch,
+ "ext" : PACKAGE_EXTENSION,
+ "name" : self.name,
+ "release" : self.release,
+ "version" : self.version,
+ }
+
+ @property
+ def arch(self):
+ return self.metadata.get("PKG_ARCH")
+
+ @property
+ def file_patterns(self):
+ return self.metadata.get("PKG_FILES").split()
--- /dev/null
+#!/usr/bin/python
+
+import logging
+
+class Plugins(object):
+ allowed_methods = ["init",]
+
+ def __init__(self, pakfire):
+ self.pakfire = pakfire
+
+ self.__plugins = []
+
+ def run(self, method):
+ if not method in self.allowed_methods:
+ raise Exception, "Unallowed method called '%s'" % method
+
+ logging.debug("Running plugin method '%s'" % method)
+
--- /dev/null
+#!/usr/bin/python
+
+import fnmatch
+import logging
+import os
+
+from ConfigParser import ConfigParser
+
+from urlgrabber.grabber import URLGrabber
+from urlgrabber.mirror import MGRandomOrder
+from urlgrabber.progress import TextMultiFileMeter
+
+import base
+import database
+import index
+
+from constants import *
+
+class Repositories(object):
+ """
+ Class that loads all repositories from the configuration files.
+
+ This is the place where repositories can be activated or deactivated.
+ """
+
+ def __init__(self, pakfire):
+ self.pakfire = pakfire
+
+ self.config = pakfire.config
+ self.distro = pakfire.distro
+
+ # Place to store the repositories
+ self._repos = []
+
+ # Create the local repository
+ self.local = LocalRepository(self.pakfire)
+ self.add_repo(self.local)
+
+ for repo_name, repo_args in self.config.get_repos():
+ self._parse(repo_name, repo_args)
+
+ self.update_indexes()
+
+ def _parse(self, name, args):
+ # XXX need to make variable expansion
+
+ _args = {
+ "name" : name,
+ "enabled" : True,
+ "gpgkey" : None,
+ }
+ _args.update(args)
+
+ repo = RemoteRepository(self.pakfire, **_args)
+
+ self.add_repo(repo)
+
+ def add_repo(self, repo):
+ self._repos.append(repo)
+ self._repos.sort()
+
+ @property
+ def enabled(self):
+ for repo in self._repos:
+ if not repo.enabled:
+ continue
+
+ yield repo
+
+ def update_indexes(self, force=False):
+ logging.debug("Updating all repository indexes (force=%s)" % force)
+
+ # XXX update all indexes if necessary or forced
+ for repo in self.enabled:
+ repo.update_index(force=force)
+
+ def get_all(self):
+ for repo in self.enabled:
+ for pkg in repo.get_all():
+ yield pkg
+
+ def get_by_name(self, name):
+ for repo in self.enabled:
+ for pkg in repo.get_by_name(name):
+ yield pkg
+
+ def get_by_glob(self, pattern):
+ for repo in self.enabled:
+ for pkg in repo.get_by_glob(pattern):
+ yield pkg
+
+ def get_by_provides(self, requires):
+ for repo in self.enabled:
+ for pkg in repo.get_by_provides(requires):
+ yield pkg
+
+ def search(self, pattern):
+ pkg_names = []
+
+ for repo in self.enabled:
+ for pkg in repo.search(pattern):
+ if pkg.name in pkg_names:
+ continue
+
+ pkg_names.append(pkg.name)
+ yield pkg
+
+
+class RepositoryFactory(object):
+ def __init__(self, pakfire, name, description):
+ self.pakfire = pakfire
+
+ self.name, self.description = name, description
+
+ # Add link to distro object
+ self.distro = pakfire.distro #distro.Distribution()
+
+ def __repr__(self):
+ return "<%s %s>" % (self.__class__.__name__, self.name)
+
+ def __cmp__(self, other):
+ return cmp(self.priority, other.priority) or cmp(self.name, other.name)
+
+ @property
+ def priority(self):
+ raise NotImplementedError
+
+ def update_index(self, force=False):
+ """
+ A function that is called to update the local data of
+ the repository.
+ """
+ pass
+
+ def get_all(self):
+ """
+ Simply returns an instance of every package in this repository.
+ """
+ for pkg in self.packages:
+ yield pkg
+
+ def get_by_name(self, name):
+ for pkg in self.packages:
+ if pkg.name == name:
+ yield pkg
+
+ def get_by_glob(self, pattern):
+ """
+ Returns a list of all packages that names match the glob pattern
+ that is provided.
+ """
+ for pkg in self.packages:
+ if fnmatch.fnmatch(pkg.name, pattern):
+ yield pkg
+
+ def get_by_provides(self, requires):
+ """
+ Returns a list of all packages that offer a matching "provides"
+ of the given "requires".
+ """
+ for pkg in self.packages:
+ if pkg.does_provide(requires):
+ yield pkg
+
+ def search(self, pattern):
+ """
+ Returns a list of packages, that match the given pattern,
+ which can be either a part of the name, summary or description
+ or can be a glob pattern that matches one of these.
+ """
+ for pkg in self.packages:
+ for item in (pkg.name, pkg.summary, pkg.description):
+ if pattern.lower() in item.lower() or \
+ fnmatch.fnmatch(item, pattern):
+ yield pkg
+
+ @property
+ def packages(self):
+ """
+ Returns all packages.
+ """
+ return self.index.packages
+
+
+class LocalRepository(RepositoryFactory):
+ enabled = True
+
+ def __init__(self, pakfire):
+ RepositoryFactory.__init__(self, pakfire, "installed", "Installed packages")
+
+ self.path = os.path.join(self.pakfire.path, PACKAGES_DB)
+
+ self.db = database.LocalPackageDatabase(self.path)
+
+ self.index = index.InstalledIndex(self.pakfire, self.db)
+
+ @property
+ def priority(self):
+ """
+ The local repository has always the highest priority.
+ """
+ return 0
+
+ # XXX need to implement better get_by_name
+
+
+
+class RemoteRepository(RepositoryFactory):
+ def __init__(self, pakfire, name, description, url, gpgkey, enabled):
+ RepositoryFactory.__init__(self, pakfire, name, description)
+
+ self.url, self.gpgkey = url, gpgkey
+
+ if enabled in (True, 1, "1", "yes", "y"):
+ self.enabled = True
+ else:
+ self.enabled = False
+
+ if self.url.startswith("file://"):
+ self.index = index.DirectoryIndex(self.pakfire, self.url[7:])
+
+ else:
+ self.index = None
+
+ logging.debug("Created new repository(name='%s', url='%s', enabled='%s')" % \
+ (self.name, self.url, self.enabled))
+
+ def __repr__(self):
+ return "<%s %s>" % (self.__class__.__name__, self.url)
+
+ @property
+ def priority(self):
+ priority = 100
+
+ url2priority = {
+ "file://" : 50,
+ "http://" : 75,
+ }
+
+ for url, prio in url2priority.items():
+ if self.url.startswith(url):
+ priority = prio
+ break
+
+ return priority
+
+ @property
+ def mirrorlist(self):
+ # XXX
+ return [
+ "http://mirror0.ipfire.org/",
+ ]
+
+ def fetch_file(self, filename):
+ grabber = URLGrabber(
+ progress_obj = TextMultiFileMeter(),
+ )
+
+ mg = MGRandomOrder(grabber, self.mirrorlist)
+
+ # XXX Need to say destination here.
+ mg.urlgrab(filename)
+
+ def update_index(self, force=False):
+ if self.index:
+ self.index.update(force=force)
+
+ #def get_all(self, requires):
+ # for pkg in self.index.get_all():
+ # if pkg.does_provide(requires):
+ # yield pkg
+
--- /dev/null
+#!/usr/bin/python
+
+import logging
+
+import depsolve
+import util
+
+from i18n import _
+
+class ActionError(Exception):
+ pass
+
+
+class Action(object):
+ def __init__(self, pakfire, pkg, deps=None):
+ self.pakfire = pakfire
+ self.pkg = pkg
+ self.deps = deps or []
+
+ def __cmp__(self, other):
+ # XXX ugly
+ return cmp(self.__repr__(), other.__repr__())
+
+ def __repr__(self):
+ return "<%s %s>" % (self.__class__.__name__, self.pkg.friendly_name)
+
+ def remove_dep(self, dep):
+ if not self.deps:
+ return
+
+ while dep in self.deps:
+ logging.debug("Removing dep %s from %s" % (dep, self))
+ self.deps.remove(dep)
+
+ def run(self):
+ raise NotImplementedError
+
+ @property
+ def local(self):
+ """
+ Reference to local repository (database).
+ """
+ return self.pakfire.repos.local
+
+
+class ActionExtract(Action):
+ def run(self):
+ logging.debug("Extracting package %s" % self.pkg.friendly_name)
+
+ # Create package in the database
+ virtpkg = self.local.db.add_package(self.pkg, installed=False)
+
+ # Grab an instance of the extractor and set it up
+ extractor = self.pkg.get_extractor(self.pakfire)
+
+ # Extract all files to instroot
+ extractor.extractall(self.pakfire.path, callback=virtpkg.add_file)
+
+ # Mark package as installed
+ virtpkg.set_installed(True)
+ #self.db.commit()
+
+ # Remove all temporary files
+ extractor.cleanup()
+
+
+class ActionScript(Action):
+ def run(self):
+ pass # XXX TBD
+
+
+class ActionScriptPreIn(ActionScript):
+ pass
+
+
+class ActionScriptPostIn(ActionScript):
+ pass
+
+
+class ActionScriptPreUn(ActionScript):
+ pass
+
+
+class ActionScriptPostUn(ActionScript):
+ pass
+
+
+class ActionInstall(Action):
+ pass
+
+
+class ActionRemove(Action):
+ pass
+
+
+class TransactionSet(object):
+ def __init__(self, pakfire, ds):
+ self.pakfire = pakfire
+ self.ds = ds
+
+ self._actions = []
+
+ self._installs = []
+ self._removes = []
+ self._updates = []
+
+ # Reference to local repository
+ self.local = pakfire.repos.local
+
+ self._packages = self.local.get_all()
+
+ self.populate()
+
+ def _install_pkg(self, pkg):
+ # XXX add dependencies for running the script here
+ action_prein = ActionScriptPreIn(self.pakfire, pkg)
+
+ action_extract = ActionExtract(self.pakfire, pkg, deps=[action_prein])
+
+ # XXX add dependencies for running the script here
+ action_postin = ActionScriptPostIn(self.pakfire, pkg, deps=[action_extract])
+
+ for action in (action_prein, action_extract, action_postin):
+ self.add_action(action)
+
+ self._installs.append(pkg)
+
+ def _update_pkg(self, pkg):
+ action_extract = ActionExtract(self.pakfire, pkg)
+
+ self.add_action(action_extract)
+ self._updates.append(pkg)
+
+ def _remove_pkg(self, pkg):
+ # XXX TBD
+ self._removes.append(pkg)
+
+ def populate(self):
+ # XXX need to check later, if this really works
+
+ # Determine which packages we have to add
+ # and which we have to remove.
+
+ for pkg in self.ds.packages:
+ pkgs = self.local.get_by_name(pkg.name)
+ pkgs = [p for p in pkgs]
+ if not pkgs:
+ # Got a new package to install
+ self._install_pkg(pkg)
+
+ else:
+ # Check for updates
+ for _pkg in pkgs:
+ if pkg > _pkg:
+ self._update_pkg(pkg)
+ break
+
+ for pkg in self._packages:
+ if not pkg in self.ds.packages:
+ self._remove_pkg(pkg)
+
+ def add_action(self, action):
+ logging.debug("New action added: %s" % action)
+
+ self._actions.append(action)
+
+ def remove_action(self, action):
+ logging.debug("Removing action: %s" % action)
+
+ self._actions.remove(action)
+ for _action in self.actions:
+ _action.remove_dep(action)
+
+ @property
+ def installs(self):
+ return sorted(self._installs)
+
+ @property
+ def updates(self):
+ return sorted(self._updates)
+
+ @property
+ def removes(self):
+ return sorted(self._removes)
+
+ @property
+ def actions(self):
+ for action in self._actions:
+ yield action
+
+ @property
+ def packages(self):
+ for action in self._actions:
+ yield action.pkg
+
+ def run_action(self, action):
+ try:
+ action.run()
+ except ActionError, e:
+ logging.error("Action finished with an error: %s - %s" % (action, e))
+
+ def run(self):
+ while True:
+ if not [a for a in self.actions]:
+ break
+
+ for action in self.actions:
+ if action.deps:
+ #logging.debug("Skipping %s which cannot be run now." % action)
+ continue
+
+ self.run_action(action)
+ self.remove_action(action)
+
+ def dump_pkg(self, format, pkg):
+ return format % (
+ pkg.name,
+ pkg.arch,
+ pkg.friendly_version,
+ pkg.repo,
+ util.format_size(pkg.size),
+ )
+
+ def dump(self):
+ width = 80
+ line = "=" * width
+ format = " %-22s %-13s %-21s %-14s %4s "
+
+ s = []
+ s.append(line)
+ s.append(format % (_("Package"), _("Arch"), _("Version"), _("Repository"), _("Size")))
+ s.append(line)
+
+ if self.installs:
+ s.append(_("Installing:"))
+ for pkg in self.installs:
+ s.append(self.dump_pkg(format, pkg))
+ s.append("")
+
+ if self.updates:
+ s.append(_("Updating:"))
+ for pkg in self.updates:
+ s.append(self.dump_pkg(format, pkg))
+ s.append("")
+
+ if self.removes:
+ s.append(_("Removing:"))
+ for pkg in self.removes:
+ s.append(self.dump_pkg(format, pkg))
+ s.append("")
+
+ s.append(_("Transaction Summary"))
+ s.append(line)
+
+ format = "%-20s %-4d %s"
+
+ if self.installs:
+ s.append(format % (_("Install"), len(self.installs), _("Package(s)")))
+
+ if self.updates:
+ s.append(format % (_("Updates"), len(self.updates), _("Package(s)")))
+
+ if self.removes:
+ s.append(format % (_("Remove"), len(self.removes), _("Package(s)")))
+
+ download_size = sum([p.size for p in self.installs + self.updates])
+ s.append(_("Total download size: %s") % util.format_size(download_size))
+ s.append("")
+
+ print "\n".join(s)
--- /dev/null
+#!/usr/bin/python
+
+import ctypes
+import fcntl
+import os
+import random
+import select
+import shutil
+import string
+import subprocess
+import sys
+import time
+
+from errors import Error
+from packages.util import format_size
+
+_libc = ctypes.cdll.LoadLibrary(None)
+_errno = ctypes.c_int.in_dll(_libc, "errno")
+_libc.personality.argtypes = [ctypes.c_ulong]
+_libc.personality.restype = ctypes.c_int
+_libc.unshare.argtypes = [ctypes.c_int,]
+_libc.unshare.restype = ctypes.c_int
+CLONE_NEWNS = 0x00020000
+
+def rm(path, *args, **kargs):
+ """
+ version of shutil.rmtree that ignores no-such-file-or-directory errors,
+ and tries harder if it finds immutable files
+ """
+ tryAgain = 1
+ failedFilename = None
+ while tryAgain:
+ tryAgain = 0
+ try:
+ shutil.rmtree(path, *args, **kargs)
+ except OSError, e:
+ if e.errno == 2: # no such file or directory
+ pass
+ elif e.errno==1 or e.errno==13:
+ tryAgain = 1
+ if failedFilename == e.filename:
+ raise
+ failedFilename = e.filename
+ os.system("chattr -R -i %s" % path)
+ else:
+ raise
+
+def logOutput(fds, logger, returnOutput=1, start=0, timeout=0):
+ output=""
+ done = 0
+
+ # set all fds to nonblocking
+ for fd in fds:
+ flags = fcntl.fcntl(fd, fcntl.F_GETFL)
+ if not fd.closed:
+ fcntl.fcntl(fd, fcntl.F_SETFL, flags| os.O_NONBLOCK)
+
+ tail = ""
+ while not done:
+ if (time.time() - start)>timeout and timeout!=0:
+ done = 1
+ break
+
+ i_rdy,o_rdy,e_rdy = select.select(fds,[],[],1)
+ for s in i_rdy:
+ # slurp as much input as is ready
+ input = s.read()
+ if input == "":
+ done = 1
+ break
+ if logger is not None:
+ lines = input.split("\n")
+ if tail:
+ lines[0] = tail + lines[0]
+ # we may not have all of the last line
+ tail = lines.pop()
+ for line in lines:
+ if line == '': continue
+ logger.info(line)
+ for h in logger.handlers:
+ h.flush()
+ if returnOutput:
+ output += input
+ if tail and logger is not None:
+ logger.debug(tail)
+ return output
+
+
+def do(command, shell=False, chrootPath=None, cwd=None, timeout=0, raiseExc=True, returnOutput=0, personality=None, logger=None, env=None, *args, **kargs):
+ # Save the output of command
+ output = ""
+
+ # Save time when command was started
+ start = time.time()
+
+ # Create preexecution thingy for command
+ preexec = ChildPreExec(personality, chrootPath, cwd)
+
+ if logger:
+ logger.debug("Executing command: %s" % command)
+
+ try:
+ child = None
+
+ # Create new child process
+ child = subprocess.Popen(
+ command,
+ shell=shell,
+ bufsize=0, close_fds=True,
+ stdin=open("/dev/null", "r"),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ preexec_fn = preexec,
+ env=env
+ )
+
+ # use select() to poll for output so we dont block
+ output = logOutput([child.stdout, child.stderr], logger, returnOutput, start, timeout)
+
+ except:
+ # kill children if they aren't done
+ if child and child.returncode is None:
+ os.killpg(child.pid, 9)
+ try:
+ if child:
+ os.waitpid(child.pid, 0)
+ except:
+ pass
+ raise
+
+ # wait until child is done, kill it if it passes timeout
+ niceExit=1
+ while child.poll() is None:
+ if (time.time() - start) > timeout and timeout != 0:
+ niceExit = 0
+ os.killpg(child.pid, 15)
+ if (time.time() - start) > (timeout+1) and timeout != 0:
+ niceExit = 0
+ os.killpg(child.pid, 9)
+
+ if not niceExit:
+ raise commandTimeoutExpired, ("Timeout(%s) expired for command:\n # %s\n%s" % (timeout, command, output))
+
+ if logger:
+ logger.debug("Child returncode was: %s" % str(child.returncode))
+
+ if raiseExc and child.returncode:
+ if returnOutput:
+ raise Error, ("Command failed: \n # %s\n%s" % (command, output), child.returncode)
+ else:
+ raise Error, ("Command failed. See logs for output.\n # %s" % (command,), child.returncode)
+
+ return output
+
+class ChildPreExec(object):
+ def __init__(self, personality, chrootPath, cwd):
+ self._personality = personality
+ self.chrootPath = chrootPath
+ self.cwd = cwd
+
+ @property
+ def personality(self):
+ """
+ Return personality value if supported.
+ Otherwise return None.
+ """
+ # taken from sys/personality.h
+ personality_defs = {
+ "linux64": 0x0000,
+ "linux32": 0x0008,
+ }
+
+ try:
+ return personality_defs[self._personality]
+ except KeyError:
+ pass
+
+ def __call__(self, *args, **kargs):
+ # Set a new process group
+ os.setpgrp()
+
+ # Set new personality if we got one.
+ if self.personality:
+ res = _libc.personality(self.personality)
+ if res == -1:
+ raise OSError(_errno.value, os.strerror(_errno.value))
+
+ # Change into new root.
+ if self.chrootPath:
+ os.chdir(self.chrootPath)
+ os.chroot(self.chrootPath)
+
+ # Change to cwd.
+ if self.cwd:
+ os.chdir(self.cwd)
+
--- /dev/null
+pakfire/i18n.py
+pakfire/distro.py
+pakfire/packages/packager.py
+pakfire/packages/source.py
+pakfire/packages/binary.py
+pakfire/packages/_io_.py
+pakfire/packages/listing.py
+pakfire/packages/__init__.py
+pakfire/packages/virtual.py
+pakfire/packages/base.py
+pakfire/packages/util.py
+pakfire/constants.py
+pakfire/repository.py
+pakfire/transaction.py
+pakfire/database.py
+pakfire/__init__.py
+pakfire/cli.py
+pakfire/index.py
+pakfire/base.py
+pakfire/env.py
+pakfire/logger.py
+pakfire/config.py
+pakfire/depsolve.py
+pakfire/util.py
+pakfire/plugins/__init__.py
+
--- /dev/null
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
+# This file is distributed under the same license as the PACKAGE package.
+# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: PACKAGE VERSION\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2011-01-27 19:55+0100\n"
+"PO-Revision-Date: 2011-01-27 20:25+0100\n"
+"Last-Translator: Michael Tremer <michael.tremer@ipfire.org>\n"
+"Language-Team: LANGUAGE <LL@li.org>\n"
+"Language: de_DE\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#: ../pakfire/packages/packager.py:109
+msgid "Extracting:"
+msgstr "Extrahiere:"
+
+#: ../pakfire/packages/base.py:42
+msgid "Name"
+msgstr "Name"
+
+#: ../pakfire/packages/base.py:43
+#: ../pakfire/transaction.py:214
+msgid "Arch"
+msgstr "Arch"
+
+#: ../pakfire/packages/base.py:44
+#: ../pakfire/transaction.py:214
+msgid "Version"
+msgstr "Version"
+
+#: ../pakfire/packages/base.py:45
+msgid "Release"
+msgstr "Release"
+
+#: ../pakfire/packages/base.py:46
+#: ../pakfire/transaction.py:214
+msgid "Size"
+msgstr "Größe"
+
+#. (_("Repo"), self.repo),
+#: ../pakfire/packages/base.py:48
+msgid "Summary"
+msgstr "Zusammenfassung"
+
+#. (_("URL"), self.url),
+#: ../pakfire/packages/base.py:50
+msgid "License"
+msgstr "Lizenz"
+
+#: ../pakfire/packages/base.py:53
+msgid "Description"
+msgstr "Beschreibung"
+
+#: ../pakfire/transaction.py:214
+msgid "Package"
+msgstr "Paket"
+
+#: ../pakfire/transaction.py:214
+msgid "Repository"
+msgstr "Repositorium"
+
+#: ../pakfire/transaction.py:218
+msgid "Installing:"
+msgstr "Installiere:"
+
+#: ../pakfire/transaction.py:224
+msgid "Updating:"
+msgstr "Aktualisiere:"
+
+#: ../pakfire/transaction.py:230
+msgid "Removing:"
+msgstr "Entferne:"
+
+#: ../pakfire/transaction.py:235
+msgid "Transaction Summary"
+msgstr "Zusammenfassung der Transaktion"
+
+#: ../pakfire/transaction.py:241
+msgid "Install"
+msgstr "Installieren"
+
+#: ../pakfire/transaction.py:241
+#: ../pakfire/transaction.py:244
+#: ../pakfire/transaction.py:247
+msgid "Package(s)"
+msgstr "Paket(e)"
+
+#: ../pakfire/transaction.py:244
+msgid "Updates"
+msgstr "Aktualisierungen"
+
+#: ../pakfire/transaction.py:247
+msgid "Remove"
+msgstr "Entfernen"
+
+#: ../pakfire/transaction.py:250
+#, python-format
+msgid "Total download size: %s"
+msgstr "Gesamte Downloadgröße: %s"
+
+#: ../pakfire/__init__.py:88
+msgid "Is this okay?"
+msgstr "Ist dies in Ordnung?"
+
+#: ../pakfire/cli.py:20
+#, python-format
+msgid "%s [y/N]"
+msgstr "%s [y/N]"
+
+#: ../pakfire/cli.py:29
+msgid "Pakfire command line interface."
+msgstr "Pakfire-Kommandozeilen-Interface"
+
+#: ../pakfire/cli.py:33
+#: ../pakfire/cli.py:84
+msgid "Enable verbose output."
+msgstr "Verbose-Modus einschalten."
+
+#: ../pakfire/cli.py:36
+#: ../pakfire/cli.py:87
+msgid "The path where pakfire should operate in."
+msgstr "Der Pfad in welchem Pakfire Änderungen vornimmt."
+
+#: ../pakfire/cli.py:43
+#: ../pakfire/cli.py:94
+msgid "Install one or more packages to the system."
+msgstr "Ein oder mehrere Pakete auf dem System installieren."
+
+#: ../pakfire/cli.py:45
+#: ../pakfire/cli.py:96
+msgid "Give name of at least one package to install."
+msgstr "Mindestens ein Paketname."
+
+#: ../pakfire/cli.py:50
+#: ../pakfire/cli.py:101
+msgid "Update the whole system or one specific package."
+msgstr "Das gesamte System aktualisieren oder ein angegebenes Paket."
+
+#: ../pakfire/cli.py:52
+#: ../pakfire/cli.py:103
+msgid "Give a name of a package to update or leave emtpy for all."
+msgstr "Ein Paket zum Aktualisieren oder leer lassen für alle."
+
+#: ../pakfire/cli.py:57
+#: ../pakfire/cli.py:108
+msgid "Print some information about the given package(s)."
+msgstr "Informationen über das/die angegebenen Paket(e) ausgeben."
+
+#: ../pakfire/cli.py:59
+#: ../pakfire/cli.py:110
+msgid "Give at least the name of one package."
+msgstr "Mindestens ein Paket."
+
+#: ../pakfire/cli.py:64
+#: ../pakfire/cli.py:115
+msgid "Search for a given pattern."
+msgstr "Nach einem Ausdruck suchen."
+
+#: ../pakfire/cli.py:66
+#: ../pakfire/cli.py:117
+msgid "A pattern to search for."
+msgstr "Nach einem Ausdruck suchen."
+
+#: ../pakfire/cli.py:80
+msgid "Pakfire builder command line interface."
+msgstr "Pakfire-builder Kommandozeilen-Interface."
+
--- /dev/null
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
+# This file is distributed under the same license as the PACKAGE package.
+# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: PACKAGE VERSION\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2011-01-27 20:25+0100\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@li.org>\n"
+"Language: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=CHARSET\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#: ../pakfire/packages/packager.py:109
+msgid "Extracting:"
+msgstr ""
+
+#: ../pakfire/packages/base.py:42
+msgid "Name"
+msgstr ""
+
+#: ../pakfire/packages/base.py:43 ../pakfire/transaction.py:214
+msgid "Arch"
+msgstr ""
+
+#: ../pakfire/packages/base.py:44 ../pakfire/transaction.py:214
+msgid "Version"
+msgstr ""
+
+#: ../pakfire/packages/base.py:45
+msgid "Release"
+msgstr ""
+
+#: ../pakfire/packages/base.py:46 ../pakfire/transaction.py:214
+msgid "Size"
+msgstr ""
+
+#. (_("Repo"), self.repo),
+#: ../pakfire/packages/base.py:48
+msgid "Summary"
+msgstr ""
+
+#. (_("URL"), self.url),
+#: ../pakfire/packages/base.py:50
+msgid "License"
+msgstr ""
+
+#: ../pakfire/packages/base.py:53
+msgid "Description"
+msgstr ""
+
+#: ../pakfire/transaction.py:214
+msgid "Package"
+msgstr ""
+
+#: ../pakfire/transaction.py:214
+msgid "Repository"
+msgstr ""
+
+#: ../pakfire/transaction.py:218
+msgid "Installing:"
+msgstr ""
+
+#: ../pakfire/transaction.py:224
+msgid "Updating:"
+msgstr ""
+
+#: ../pakfire/transaction.py:230
+msgid "Removing:"
+msgstr ""
+
+#: ../pakfire/transaction.py:235
+msgid "Transaction Summary"
+msgstr ""
+
+#: ../pakfire/transaction.py:241
+msgid "Install"
+msgstr ""
+
+#: ../pakfire/transaction.py:241 ../pakfire/transaction.py:244
+#: ../pakfire/transaction.py:247
+msgid "Package(s)"
+msgstr ""
+
+#: ../pakfire/transaction.py:244
+msgid "Updates"
+msgstr ""
+
+#: ../pakfire/transaction.py:247
+msgid "Remove"
+msgstr ""
+
+#: ../pakfire/transaction.py:250
+#, python-format
+msgid "Total download size: %s"
+msgstr ""
+
+#: ../pakfire/__init__.py:88
+msgid "Is this okay?"
+msgstr ""
+
+#: ../pakfire/cli.py:20
+#, python-format
+msgid "%s [y/N]"
+msgstr ""
+
+#: ../pakfire/cli.py:29
+msgid "Pakfire command line interface."
+msgstr ""
+
+#: ../pakfire/cli.py:33 ../pakfire/cli.py:84
+msgid "Enable verbose output."
+msgstr ""
+
+#: ../pakfire/cli.py:36 ../pakfire/cli.py:87
+msgid "The path where pakfire should operate in."
+msgstr ""
+
+#: ../pakfire/cli.py:43 ../pakfire/cli.py:94
+msgid "Install one or more packages to the system."
+msgstr ""
+
+#: ../pakfire/cli.py:45 ../pakfire/cli.py:96
+msgid "Give name of at least one package to install."
+msgstr ""
+
+#: ../pakfire/cli.py:50 ../pakfire/cli.py:101
+msgid "Update the whole system or one specific package."
+msgstr ""
+
+#: ../pakfire/cli.py:52 ../pakfire/cli.py:103
+msgid "Give a name of a package to update or leave emtpy for all."
+msgstr ""
+
+#: ../pakfire/cli.py:57 ../pakfire/cli.py:108
+msgid "Print some information about the given package(s)."
+msgstr ""
+
+#: ../pakfire/cli.py:59 ../pakfire/cli.py:110
+msgid "Give at least the name of one package."
+msgstr ""
+
+#: ../pakfire/cli.py:64 ../pakfire/cli.py:115
+msgid "Search for a given pattern."
+msgstr ""
+
+#: ../pakfire/cli.py:66 ../pakfire/cli.py:117
+msgid "A pattern to search for."
+msgstr ""
+
+#: ../pakfire/cli.py:80
+msgid "Pakfire builder command line interface."
+msgstr ""
--- /dev/null
+#!/usr/bin/python
+
+import logging
+import os
+import sys
+
+from pakfire.cli import Cli, CliBuilder
+
+basename2cls = {
+ "pakfire" : Cli,
+ "pakfire-build" : CliBuilder,
+}
+
+# Get the basename of the program
+basename = os.path.basename(sys.argv[0])
+
+# Check if the program was called with a weird basename.
+# If so, we exit immediately.
+if not basename2cls.has_key(basename):
+ sys.exit(127)
+
+# Creating command line interface
+cli = basename2cls[basename]()
+
+# Return code for the shell.
+ret = 0
+
+try:
+ cli.run()
+
+except KeyboardInterrupt:
+ logging.critical("Recieved keyboard interupt (Ctrl-C). Exiting.")
+ ret = 1
+
+sys.exit(ret)
+
--- /dev/null
+pakfire
\ No newline at end of file
--- /dev/null
+
+from distutils.core import setup
+
+from DistUtilsExtra.command import *
+
+setup(
+ name = "pakfire",
+ version = "0.0.1",
+ description = "Pakfire - Package manager for IPFire.",
+ author = "IPFire.org Team",
+ author_email = "info@ipfire.org",
+ url = "http://redmine.ipfire.org/projects/buildsystem3",
+ packages = ["pakfire"],
+ scripts = ["scripts/pakfire", "scripts/pakfire-build"],
+ cmdclass = { "build" : build_extra.build_extra,
+ "build_i18n" : build_i18n.build_i18n },
+)