]> git.ipfire.org Git - pakfire.git/commitdiff
Improve downloader.
authorMichael Tremer <michael.tremer@ipfire.org>
Tue, 22 Feb 2011 20:53:36 +0000 (21:53 +0100)
committerMichael Tremer <michael.tremer@ipfire.org>
Tue, 22 Feb 2011 20:53:36 +0000 (21:53 +0100)
We now do have different downloaders for different type of data:
i.e metadata, packages, etc. that do come with different properties.

pakfire/constants.py
pakfire/downloader.py
pakfire/index.py
pakfire/packages/installed.py
pakfire/repository.py
pakfire/transaction.py
po/pakfire.pot

index 1756c2af2973e1882067b91f4eec9387423feea4..2e3fbdc76b43c5a44cc57b8a0ff77b3ac8b4d29f 100644 (file)
@@ -2,6 +2,8 @@
 
 import os.path
 
+PAKFIRE_VERSION = "testing"
+
 SYSCONFDIR = os.path.join(os.path.dirname(__file__), "..", "examples")
 if not os.path.exists(SYSCONFDIR):
        SYSCONFDIR = "/etc"
index 31a72652b33d551295283ba9b9f2e9534010ab6f..b2d9991c24864b9c524a9aef884df8794192aa73 100644 (file)
@@ -6,6 +6,7 @@ import random
 
 from urlgrabber.grabber import URLGrabber, URLGrabError
 from urlgrabber.mirror import MirrorGroup
+from urlgrabber.progress import TextMeter
 
 from constants import *
 
@@ -15,9 +16,42 @@ class PakfireGrabber(URLGrabber):
        """
                Class to make some modifications on the urlgrabber configuration.
        """
-       # XXX add proxy, user_agent, keep-alive, throttle things here
-       pass
+       # XXX add proxy, throttle things here
 
+       def __init__(self, *args, **kwargs):
+               kwargs.update({
+                       "quote" : 0,
+                       "user_agent" : "pakfire/%s" % PAKFIRE_VERSION,
+               })
+
+               URLGrabber.__init__(self, *args, **kwargs)
+
+
+class PackageDownloader(PakfireGrabber):
+       def __init__(self, *args, **kwargs):
+               kwargs.update({
+                               "progress_obj" : TextMeter(),
+               })
+
+               PakfireGrabber.__init__(self, *args, **kwargs)
+
+
+class MetadataDownloader(PakfireGrabber):
+       def __init__(self, *args, **kwargs):
+               kwargs.update({
+                       "http_headers" : (('Pragma', 'no-cache'),),
+               })
+
+               PakfireGrabber.__init__(self, *args, **kwargs)
+
+
+class DatabaseDownloader(PackageDownloader):
+       def __init__(self, *args, **kwargs):
+               kwargs.update({
+                       "http_headers" : (('Pragma', 'no-cache'),),
+               })
+
+               PackageDownloader.__init__(self, *args, **kwargs)
 
 
 class Mirror(object):
@@ -73,7 +107,7 @@ class MirrorList(object):
                                force = True
 
                if force:
-                       g = PakfireGrabber()
+                       g = MetadataDownloader()
 
                        try:
                                mirrordata = g.urlread(self.mirrorlist, limit=MIRRORLIST_MAXSIZE)
index 841511e8c0ae7e884675c8bdc6291dd486c6eb45..2e9de7c3c33e7d408b6d74b262abbb179fe97405 100644 (file)
@@ -6,11 +6,13 @@ import random
 import shutil
 
 import database
+import downloader
 import packages
 import repository
 import util
 
 from constants import *
+from i18n import _
 
 class Index(object):
        def __init__(self, pakfire, repo):
@@ -156,8 +158,12 @@ class DatabaseIndex(Index):
                                download = False
 
                if download:
+                       # Initialize a grabber for download.
+                       grabber = downloader.MetadataDownloader()
+                       grabber = self.repo.mirrors.group(grabber)
+
                        # XXX do we need limit here for security reasons?
-                       metadata = self.repo.grabber.urlread("repodata/repomd.json")
+                       metadata = grabber.urlread("repodata/repomd.json")
 
                        with cache.open(cache_filename, "w") as o:
                                o.write(metadata)
@@ -169,9 +175,15 @@ class DatabaseIndex(Index):
                cache_filename = "metadata/packages.db" # XXX just for now
 
                if not cache.exists(cache_filename):
+                       # Initialize a grabber for download.
+                       grabber = downloader.DatabaseDownloader(
+                               text = _("%s: package database") % self.repo.name,
+                       )
+                       grabber = self.repo.mirrors.group(grabber)
+
+                       i = grabber.urlopen("repodata/packages.db") # XXX just for now
                        o = cache.open(cache_filename, "w")
-                       i = self.repo.grabber.urlopen("repodata/packages.db") # XXX just for now
-                       
+
                        buf = i.read(BUFFER_SIZE)
                        while buf:
                                o.write(buf)
index f7e6743094a69f670a739904de6f2db24eba89fb..a41bf40ebe8df7df08340067bd7a31161e8a7596 100644 (file)
@@ -2,6 +2,8 @@
 
 import os
 
+import pakfire.downloader
+
 from base import Package
 from binary import BinaryPackage
 
@@ -148,7 +150,7 @@ class DatabasePackage(Package):
 
                c.close()
 
-       def download(self):
+       def download(self, text=""):
                """
                        Downloads the package from repository and returns a new instance
                        of BinaryPackage.
@@ -172,12 +174,19 @@ class DatabasePackage(Package):
                                cache.remove(cache_filename)
 
                if download:
-                       # Open input and output files and download the file.
-                       o = cache.open(cache_filename, "w")
                        # Make sure filename is of type string (and not unicode)
                        filename = str(self.filename)
 
-                       i = self.repo.grabber.urlopen(filename)
+                       # Get a package grabber and add mirror download capabilities to it.
+                       grabber = pakfire.downloader.PackageDownloader(
+                               text=text + os.path.basename(filename),
+                       )
+                       grabber = self.repo.mirrors.group(grabber)
+
+                       i = grabber.urlopen(filename)
+
+                       # Open input and output files and download the file.
+                       o = cache.open(cache_filename, "w")
 
                        buf = i.read(BUFFER_SIZE)
                        while buf:
index f92b28b30a66f0490320e24db72783e53ce78c84..dc780c60596d86f878425ce3b5109d525dc80435 100644 (file)
@@ -7,7 +7,6 @@ import stat
 import time
 
 from ConfigParser import ConfigParser
-from urlgrabber.progress import TextMeter, TextMultiFileMeter
 
 import base
 import database
@@ -488,18 +487,6 @@ class RemoteRepository(RepositoryFactory):
 
                return priority
 
-       @property
-       def grabber(self):
-               if not self.__grabber:
-                       grabber = downloader.PakfireGrabber(
-#                              progress_obj = TextMultiFileMeter(), # XXX broken?
-                               progress_obj = TextMeter(),
-                       )
-                       
-                       self.__grabber = self.mirrors.group(grabber)
-
-               return self.__grabber
-
        def update_index(self, force=False):
                if self.index:
                        self.index.update(force=force)
index 546f267abc2798a463e9ab1ee5ec940c233947da..45634e9992bb6952b2866765a3af9825c4aa9eb1 100644 (file)
@@ -151,8 +151,19 @@ class TransactionSet(object):
                """
                        Convert all packages to BinaryPackage.
                """
-               for download in self.downloads:
-                       pkg = download.download()
+               pkgs = []
+               for pkg in self.downloads:
+                       pkgs.append(pkg)
+
+               # If there are no packages to download skip the rest.
+               if not pkgs:
+                       return
+
+               logging.info("Downloading packages:")
+               i = 0
+               for download in pkgs:
+                       i += 1
+                       pkg = download.download(text="(%2d/%02d): " % (i, len(pkgs)))
 
                        for download_list in self.download_lists:
                                if download in download_list:
@@ -160,6 +171,9 @@ class TransactionSet(object):
                                        download_list.append(pkg)
                                        break
 
+               # Just an empty line to seperate the downloads from the extractions.
+               logging.info("")
+
 
 class Transaction(object):
        def __init__(self, pakfire, ds):
index 61be700163e330a40a4a7d8b089dd078aaff32b9..ed6c65c1c86b5caed04d28b3069daaeeb19957fb 100644 (file)
@@ -8,7 +8,7 @@ msgid ""
 msgstr ""
 "Project-Id-Version: PACKAGE VERSION\n"
 "Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2011-02-22 17:27+0100\n"
+"POT-Creation-Date: 2011-02-22 21:49+0100\n"
 "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
 "Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
 "Language-Team: LANGUAGE <LL@li.org>\n"
@@ -215,11 +215,16 @@ msgstr ""
 msgid "Remove"
 msgstr ""
 
-#: ../pakfire/depsolve.py:240
+#: ../pakfire/depsolve.py:242
 #, python-format
 msgid "Total download size: %s"
 msgstr ""
 
+#: ../pakfire/index.py:180
+#, python-format
+msgid "%s: package database"
+msgstr ""
+
 #: ../pakfire/__init__.py:172
 msgid "Is this okay?"
 msgstr ""