"""
return self.build.build_repo.pakfire(arch=self.arch, **kwargs)
- # Perform only four dependency checks at a time
- __depcheck_ratelimiter = asyncio.Semaphore(4)
-
async def depcheck(self):
"""
Perform dependency check
"""
- async with self.__depcheck_ratelimiter:
- log.info("Performing dependency check for %s (%s)" % (self, self.uuid))
+ return await self.build.build_repo.installcheck([self])
- with self.db.transaction():
- return await asyncio.to_thread(self._depcheck)
+ def _installcheck(self, p):
+ """
+ Implementation that takes an active Pakfire instance and
+ performs a check on whether the source package can be installed
+ """
+ log.info("Performing install check for %s (%s)" % (self, self.uuid))
- def _depcheck(self):
- # Create a Pakfire instance
- with self.pakfire() as p:
- # Fetch the commandline repository
- repo = p.get_repo("@commandline")
+ # Fetch the commandline repository
+ repo = p.get_repo("@commandline")
- # Open the archive
- archive = p.open(self.pkg.path)
+ # Open the archive
+ archive = p.open(self.pkg.path)
- # Fetch the package
- package = archive.get_package(repo)
+ # Fetch the package
+ package = archive.get_package(repo)
- # Perform the installcheck
- try:
- package.installcheck()
+ # Perform the installcheck
+ try:
+ package.installcheck()
- # Store any dependency errors
- except DependencyError as e:
- self._set_attribute("depcheck_succeeded", False)
+ # Store any dependency errors
+ except DependencyError as e:
+ self._set_attribute("depcheck_succeeded", False)
- # Store the message
- self._set_attribute("message", "%s" % e)
+ # Store the message
+ self._set_attribute("message", "%s" % e)
- # Raise any other exceptions
- except Exception as e:
- raise e
+ # Raise any other exceptions
+ except Exception as e:
+ raise e
- # Everything OK
- else:
- self._set_attribute("depcheck_succeeded", True)
+ # Everything OK
+ else:
+ self._set_attribute("depcheck_succeeded", True)
- # Store the timestamp
- self._set_attribute_now("depcheck_performed_at")
+ # Store the timestamp
+ self._set_attribute_now("depcheck_performed_at")
# Return the status
return self.depcheck_succeeded
import asyncio
import configparser
+import contextlib
import datetime
import io
import logging
await self._relaunch_pending_jobs()
# Perform this for all child repositories
- for repo in self.children:
- await repo._relaunch_pending_jobs()
+ async with asyncio.TaskGroup() as tasks:
+ for repo in self.children:
+ tasks.create_task(repo._relaunch_pending_jobs())
async def _relaunch_pending_jobs(self):
"""
"""
log.debug("%s: Relaunching pending jobs" % self)
- # Try to relaunch all pending jobs
- await self.backend.jobs.launch(self.pending_jobs)
+ # Perform installcheck on all pending jobs
+ success = await self.installcheck(self.pending_jobs)
+
+ # If at least one job passed the check we will try to dispatch it
+ if success:
+ self.backend.run_task(self.backend.jobs.queue.dispatch)
+
+ async def installcheck(self, jobs):
+ """
+ Performs an installation check for all given jobs
+ """
+ return await asyncio.to_thread(self._installcheck, jobs)
+
+ def _installcheck(self, jobs):
+ """
+ Implementation of installcheck which is performed on all given jobs.
+ """
+ # We create a new context which allows us to enter many (but unknown) numbers of
+ # Pakfire instances at a time which leads to Pakfire only being initialized once
+ # per architecture. We are then able to run all dependency checks one after the
+ # other.
+ with contextlib.ExitStack() as stack:
+ ps = {}
+
+ success = False
+
+ for job in jobs:
+ # Fetch the Pakfire instance of the architecture or create a new one
+ try:
+ p = ps[job.arch]
+ except KeyError:
+ p = ps[job.arch] = stack.enter_context(
+ self.pakfire(arch=job.arch)
+ )
+
+ # Perform the check for the job
+ with self.db.transaction():
+ if job._installcheck(p):
+ success = True
+
+ return success
# Write repository