import os.path
+PAKFIRE_VERSION = "testing"
+
SYSCONFDIR = os.path.join(os.path.dirname(__file__), "..", "examples")
if not os.path.exists(SYSCONFDIR):
SYSCONFDIR = "/etc"
from urlgrabber.grabber import URLGrabber, URLGrabError
from urlgrabber.mirror import MirrorGroup
+from urlgrabber.progress import TextMeter
from constants import *
"""
Class to make some modifications on the urlgrabber configuration.
"""
- # XXX add proxy, user_agent, keep-alive, throttle things here
- pass
+ # XXX add proxy, throttle things here
+ def __init__(self, *args, **kwargs):
+ kwargs.update({
+ "quote" : 0,
+ "user_agent" : "pakfire/%s" % PAKFIRE_VERSION,
+ })
+
+ URLGrabber.__init__(self, *args, **kwargs)
+
+
+class PackageDownloader(PakfireGrabber):
+ def __init__(self, *args, **kwargs):
+ kwargs.update({
+ "progress_obj" : TextMeter(),
+ })
+
+ PakfireGrabber.__init__(self, *args, **kwargs)
+
+
+class MetadataDownloader(PakfireGrabber):
+ def __init__(self, *args, **kwargs):
+ kwargs.update({
+ "http_headers" : (('Pragma', 'no-cache'),),
+ })
+
+ PakfireGrabber.__init__(self, *args, **kwargs)
+
+
+class DatabaseDownloader(PackageDownloader):
+ def __init__(self, *args, **kwargs):
+ kwargs.update({
+ "http_headers" : (('Pragma', 'no-cache'),),
+ })
+
+ PackageDownloader.__init__(self, *args, **kwargs)
class Mirror(object):
force = True
if force:
- g = PakfireGrabber()
+ g = MetadataDownloader()
try:
mirrordata = g.urlread(self.mirrorlist, limit=MIRRORLIST_MAXSIZE)
import shutil
import database
+import downloader
import packages
import repository
import util
from constants import *
+from i18n import _
class Index(object):
def __init__(self, pakfire, repo):
download = False
if download:
+ # Initialize a grabber for download.
+ grabber = downloader.MetadataDownloader()
+ grabber = self.repo.mirrors.group(grabber)
+
# XXX do we need limit here for security reasons?
- metadata = self.repo.grabber.urlread("repodata/repomd.json")
+ metadata = grabber.urlread("repodata/repomd.json")
with cache.open(cache_filename, "w") as o:
o.write(metadata)
cache_filename = "metadata/packages.db" # XXX just for now
if not cache.exists(cache_filename):
+ # Initialize a grabber for download.
+ grabber = downloader.DatabaseDownloader(
+ text = _("%s: package database") % self.repo.name,
+ )
+ grabber = self.repo.mirrors.group(grabber)
+
+ i = grabber.urlopen("repodata/packages.db") # XXX just for now
o = cache.open(cache_filename, "w")
- i = self.repo.grabber.urlopen("repodata/packages.db") # XXX just for now
-
+
buf = i.read(BUFFER_SIZE)
while buf:
o.write(buf)
import os
+import pakfire.downloader
+
from base import Package
from binary import BinaryPackage
c.close()
- def download(self):
+ def download(self, text=""):
"""
Downloads the package from repository and returns a new instance
of BinaryPackage.
cache.remove(cache_filename)
if download:
- # Open input and output files and download the file.
- o = cache.open(cache_filename, "w")
# Make sure filename is of type string (and not unicode)
filename = str(self.filename)
- i = self.repo.grabber.urlopen(filename)
+ # Get a package grabber and add mirror download capabilities to it.
+ grabber = pakfire.downloader.PackageDownloader(
+ text=text + os.path.basename(filename),
+ )
+ grabber = self.repo.mirrors.group(grabber)
+
+ i = grabber.urlopen(filename)
+
+ # Open input and output files and download the file.
+ o = cache.open(cache_filename, "w")
buf = i.read(BUFFER_SIZE)
while buf:
import time
from ConfigParser import ConfigParser
-from urlgrabber.progress import TextMeter, TextMultiFileMeter
import base
import database
return priority
- @property
- def grabber(self):
- if not self.__grabber:
- grabber = downloader.PakfireGrabber(
-# progress_obj = TextMultiFileMeter(), # XXX broken?
- progress_obj = TextMeter(),
- )
-
- self.__grabber = self.mirrors.group(grabber)
-
- return self.__grabber
-
def update_index(self, force=False):
if self.index:
self.index.update(force=force)
"""
Convert all packages to BinaryPackage.
"""
- for download in self.downloads:
- pkg = download.download()
+ pkgs = []
+ for pkg in self.downloads:
+ pkgs.append(pkg)
+
+ # If there are no packages to download skip the rest.
+ if not pkgs:
+ return
+
+ logging.info("Downloading packages:")
+ i = 0
+ for download in pkgs:
+ i += 1
+ pkg = download.download(text="(%2d/%02d): " % (i, len(pkgs)))
for download_list in self.download_lists:
if download in download_list:
download_list.append(pkg)
break
+ # Just an empty line to seperate the downloads from the extractions.
+ logging.info("")
+
class Transaction(object):
def __init__(self, pakfire, ds):
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2011-02-22 17:27+0100\n"
+"POT-Creation-Date: 2011-02-22 21:49+0100\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
msgid "Remove"
msgstr ""
-#: ../pakfire/depsolve.py:240
+#: ../pakfire/depsolve.py:242
#, python-format
msgid "Total download size: %s"
msgstr ""
+#: ../pakfire/index.py:180
+#, python-format
+msgid "%s: package database"
+msgstr ""
+
#: ../pakfire/__init__.py:172
msgid "Is this okay?"
msgstr ""