13 import pakfire
.packages
17 from . import builders
19 from . import packages
20 from . import repository
24 from .constants
import *
25 from .decorators
import *
27 def import_from_package(_pakfire
, filename
, distro
=None, commit
=None, type="release",
28 arches
=None, check_for_duplicates
=True, owner
=None):
31 distro
= commit
.source
.distro
35 # Open the package file to read some basic information.
36 pkg
= pakfire
.packages
.open(None, None, filename
)
38 if check_for_duplicates
:
39 if distro
.has_package(pkg
.name
, pkg
.epoch
, pkg
.version
, pkg
.release
):
40 logging
.warning("Duplicate package detected: %s. Skipping." % pkg
)
43 # Open the package and add it to the database.
44 pkg
= packages
.Package
.open(_pakfire
, filename
)
45 logging
.debug("Created new package: %s" % pkg
)
47 # Associate the package to the processed commit.
51 # Create a new build object from the package which
52 # is always a release build.
53 build
= Build
.create(_pakfire
, pkg
, type=type, owner
=owner
, distro
=distro
)
54 logging
.debug("Created new build job: %s" % build
)
56 # Create all automatic jobs.
57 build
.create_autojobs(arches
=arches
)
62 class Builds(base
.Object
):
63 def _get_build(self
, query
, *args
):
64 res
= self
.db
.get(query
, *args
)
67 return Build(self
.backend
, res
.id, data
=res
)
69 def _get_builds(self
, query
, *args
):
70 res
= self
.db
.query(query
, *args
)
73 yield Build(self
.backend
, row
.id, data
=row
)
75 def get_by_id(self
, id, data
=None):
76 return Build(self
.pakfire
, id, data
=data
)
78 def get_by_uuid(self
, uuid
):
79 build
= self
.db
.get("SELECT id FROM builds WHERE uuid = %s LIMIT 1", uuid
)
82 return self
.get_by_id(build
.id)
84 def get_all(self
, limit
=50):
85 query
= "SELECT * FROM builds ORDER BY time_created DESC"
88 query
+= " LIMIT %d" % limit
90 return [self
.get_by_id(b
.id, b
) for b
in self
.db
.query(query
)]
92 def get_by_user(self
, user
, type=None, public
=None):
96 if not type or type == "scratch":
97 # On scratch builds the user id equals the owner id.
98 conditions
.append("(builds.type = 'scratch' AND owner_id = %s)")
101 elif not type or type == "release":
105 conditions
.append("public = 'Y'")
106 elif public
is False:
107 conditions
.append("public = 'N'")
109 query
= "SELECT builds.* AS id FROM builds \
110 JOIN packages ON builds.pkg_id = packages.id"
113 query
+= " WHERE %s" % " AND ".join(conditions
)
115 query
+= " ORDER BY builds.time_created DESC"
118 for build
in self
.db
.query(query
, *args
):
119 build
= Build(self
.pakfire
, build
.id, build
)
124 def get_by_name(self
, name
, type=None, public
=None, user
=None, limit
=None, offset
=None):
127 "packages.name = %s",
131 conditions
.append("builds.type = %s")
136 or_conditions
.append("public = 'Y'")
137 elif public
is False:
138 or_conditions
.append("public = 'N'")
140 if user
and not user
.is_admin():
141 or_conditions
.append("builds.owner_id = %s")
144 query
= "SELECT builds.* AS id FROM builds \
145 JOIN packages ON builds.pkg_id = packages.id"
148 conditions
.append(" OR ".join(or_conditions
))
151 query
+= " WHERE %s" % " AND ".join(conditions
)
153 if type == "release":
154 query
+= " ORDER BY packages.name,packages.epoch,packages.version,packages.release,id ASC"
155 elif type == "scratch":
156 query
+= " ORDER BY time_created DESC"
160 query
+= " LIMIT %s,%s"
161 args
.extend([offset
, limit
])
166 return [Build(self
.pakfire
, b
.id, b
) for b
in self
.db
.query(query
, *args
)]
168 def get_latest_by_name(self
, name
, type=None, public
=None):
170 SELECT * FROM builds \
171 LEFT JOIN builds_latest ON builds.id = builds_latest.build_id \
172 WHERE builds_latest.package_name = %s"
176 query
+= " AND builds_latest.build_type = %s"
180 query
+= " AND builds.public = %s"
182 elif public
is False:
183 query
+= " AND builds.public = %s"
186 # Get the last one only.
187 # Prefer release builds over scratch builds.
190 CASE builds.type WHEN 'release' THEN 0 ELSE 1 END, \
191 builds.time_created DESC \
194 res
= self
.db
.get(query
, *args
)
197 return Build(self
.pakfire
, res
.id, res
)
199 def get_active_builds(self
, name
, public
=None):
201 SELECT * FROM builds \
202 LEFT JOIN builds_latest ON builds.id = builds_latest.build_id \
203 WHERE builds_latest.package_name = %s AND builds.type = %s"
204 args
= [name
, "release"]
207 query
+= " AND builds.public = %s"
209 elif public
is False:
210 query
+= " AND builds.public = %s"
214 for row
in self
.db
.query(query
, *args
):
215 b
= Build(self
.pakfire
, row
.id, row
)
218 # Sort the result. Lastest build first.
219 builds
.sort(reverse
=True)
224 builds
= self
.db
.get("SELECT COUNT(*) AS count FROM builds")
228 def get_obsolete(self
, repo
=None):
230 Get all obsoleted builds.
232 If repo is True: which are in any repository.
233 If repo is some Repository object: which are in this repository.
238 query
= "SELECT id FROM builds WHERE state = 'obsolete'"
241 query
= "SELECT build_id AS id FROM repositories_builds \
242 JOIN builds ON builds.id = repositories_builds.build_id \
243 WHERE builds.state = 'obsolete'"
245 if repo
and not repo
is True:
246 query
+= " AND repositories_builds.repo_id = %s"
249 res
= self
.db
.query(query
, *args
)
253 build
= Build(self
.pakfire
, build
.id)
258 def get_changelog(self
, name
, public
=None, limit
=5, offset
=0):
259 query
= "SELECT builds.* FROM builds \
260 JOIN packages ON builds.pkg_id = packages.id \
265 args
= ["release", name
,]
268 query
+= " AND builds.public = %s"
270 elif public
== False:
271 query
+= " AND builds.public = %s"
274 query
+= " ORDER BY builds.time_created DESC"
278 query
+= " LIMIT %s,%s"
279 args
+= [offset
, limit
]
285 for b
in self
.db
.query(query
, *args
):
286 b
= Build(self
.pakfire
, b
.id, b
)
289 builds
.sort(reverse
=True)
293 def get_comments(self
, limit
=10, offset
=None, user
=None):
294 query
= "SELECT * FROM builds_comments \
295 JOIN users ON builds_comments.user_id = users.id"
300 wheres
.append("users.id = %s")
304 query
+= " WHERE %s" % " AND ".join(wheres
)
307 query
+= " ORDER BY time_created DESC"
312 query
+= " LIMIT %s,%s"
320 for comment
in self
.db
.query(query
, *args
):
321 comment
= logs
.CommentLogEntry(self
.pakfire
, comment
)
322 comments
.append(comment
)
326 def get_build_times_summary(self
, name
=None, job_type
=None, arch
=None):
329 builds_times.arch AS arch, \
330 MAX(duration) AS maximum, \
331 MIN(duration) AS minimum, \
332 AVG(duration) AS average, \
333 SUM(duration) AS sum, \
334 STDDEV_POP(duration) AS stddev \
336 LEFT JOIN builds ON builds_times.build_id = builds.id \
337 LEFT JOIN packages ON builds.pkg_id = packages.id"
344 conditions
.append("packages.name = %s")
347 # Filter by job types.
349 conditions
.append("builds_times.job_type = %s")
350 args
.append(job_type
)
354 conditions
.append("builds_times.arch = %s")
359 query
+= " WHERE %s" % " AND ".join(conditions
)
361 # Grouping and sorting.
362 query
+= " GROUP BY arch ORDER BY arch DESC"
364 return self
.db
.query(query
, *args
)
366 def get_build_times_by_arch(self
, arch
, **kwargs
):
371 build_times
= self
.get_build_times_summary(**kwargs
)
373 return build_times
[0]
376 class Build(base
.Object
):
377 def __init__(self
, pakfire
, id, data
=None):
378 base
.Object
.__init
__(self
, pakfire
)
386 self
._jobs
_test
= None
387 self
._depends
_on
= None
396 return "<%s id=%s %s>" % (self
.__class
__.__name
__, self
.id, self
.pkg
)
398 def __cmp__(self
, other
):
402 return cmp(self
.pkg
, other
.pkg
)
405 jobs
= self
.backend
.jobs
._get
_jobs
("SELECT * FROM jobs \
406 WHERE build_id = %s", self
.id)
408 return iter(sorted(jobs
))
411 def create(cls
, pakfire
, pkg
, type="release", owner
=None, distro
=None, public
=True):
412 assert type in ("release", "scratch", "test")
413 assert distro
, "You need to specify the distribution of this build."
420 # Check if scratch build has an owner.
421 if type == "scratch" and not owner
:
422 raise Exception, "Scratch builds require an owner"
424 # Set the default priority of this build.
425 if type == "release":
428 elif type == "scratch":
434 id = pakfire
.db
.execute("""
435 INSERT INTO builds(uuid, pkg_id, type, distro_id, time_created, public, priority)
436 VALUES(%s, %s, %s, %s, NOW(), %s, %s)""", "%s" % uuid
.uuid4(), pkg
.id,
437 type, distro
.id, public
, priority
)
439 # Set the owner of this buildgroup.
441 pakfire
.db
.execute("UPDATE builds SET owner_id = %s WHERE id = %s",
444 build
= cls(pakfire
, id)
446 # Log that the build has been created.
447 build
.log("created", user
=owner
)
449 # Create directory where the files live.
450 if not os
.path
.exists(build
.path
):
451 os
.makedirs(build
.path
)
453 # Move package file to the directory of the build.
454 source_path
= os
.path
.join(build
.path
, "src")
455 build
.pkg
.move(source_path
)
457 # Generate an update id.
458 build
.generate_update_id()
460 # Obsolete all other builds with the same name to track updates.
461 build
.obsolete_others()
463 # Search for possible bug IDs in the commit message.
464 build
.search_for_bugs()
470 Deletes this build including all jobs, packages and the source
473 # If the build is in a repository, we need to remove it.
475 self
.repo
.rem_build(self
)
477 for job
in self
.jobs
+ self
.test_jobs
:
483 # Delete everything related to this build.
485 self
.__delete
_comments
()
486 self
.__delete
_history
()
487 self
.__delete
_watchers
()
489 # Delete the build itself.
490 self
.db
.execute("DELETE FROM builds WHERE id = %s", self
.id)
492 def __delete_bugs(self
):
494 Delete all associated bugs.
496 self
.db
.execute("DELETE FROM builds_bugs WHERE build_id = %s", self
.id)
498 def __delete_comments(self
):
502 self
.db
.execute("DELETE FROM builds_comments WHERE build_id = %s", self
.id)
504 def __delete_history(self
):
506 Delete the repository history.
508 self
.db
.execute("DELETE FROM repositories_history WHERE build_id = %s", self
.id)
510 def __delete_watchers(self
):
514 self
.db
.execute("DELETE FROM builds_watchers WHERE build_id = %s", self
.id)
518 Resets the whole build so it can start again (as it has never
521 for job
in self
.jobs
:
524 #self.__delete_bugs()
525 self
.__delete
_comments
()
526 self
.__delete
_history
()
527 self
.__delete
_watchers
()
529 self
.state
= "building"
536 Lazy fetching of data for this object.
538 if self
._data
is None:
539 self
._data
= self
.db
.get("SELECT * FROM builds WHERE id = %s", self
.id)
547 A set of information that is sent to the XMLRPC client.
549 return { "uuid" : self
.uuid
}
551 def log(self
, action
, user
=None, bug_id
=None):
556 self
.db
.execute("INSERT INTO builds_history(build_id, action, user_id, time, bug_id) \
557 VALUES(%s, %s, %s, NOW(), %s)", self
.id, action
, user_id
, bug_id
)
562 The UUID of this build.
564 return self
.data
.uuid
569 Get package that is to be built in the build.
571 if self
._pkg
is None:
572 self
._pkg
= packages
.Package(self
.pakfire
, self
.data
.pkg_id
)
578 return "%s-%s" % (self
.pkg
.name
, self
.pkg
.friendly_version
)
583 The type of this build.
585 return self
.data
.type
590 The ID of the owner of this build.
592 return self
.data
.owner_id
597 The owner of this build.
599 if not self
.owner_id
:
602 if self
._owner
is None:
603 self
._owner
= self
.pakfire
.users
.get_by_id(self
.owner_id
)
610 return self
.data
.distro_id
614 if self
._distro
is None:
615 self
._distro
= self
.pakfire
.distros
.get_by_id(self
.distro_id
)
622 if self
.type == "scratch":
625 def get_depends_on(self
):
626 if self
.data
.depends_on
and self
._depends
_on
is None:
627 self
._depends
_on
= Build(self
.pakfire
, self
.data
.depends_on
)
629 return self
._depends
_on
631 def set_depends_on(self
, build
):
632 self
.db
.execute("UPDATE builds SET depends_on = %s WHERE id = %s",
636 self
._depends
_on
= build
637 self
._data
["depends_on"] = build
.id
639 depends_on
= property(get_depends_on
, set_depends_on
)
643 return self
.data
.time_created
647 return self
.created
.date()
652 Is this build public?
654 return self
.data
.public
== "Y"
659 Returns the size on disk of this build.
663 # Add the source package.
668 s
+= sum((j
.size
for j
in self
.jobs
))
674 # # Cache all states.
675 # states = [j.state for j in self.jobs]
677 # target_state = "unknown"
679 # # If at least one job has failed, the whole build has failed.
680 # if "failed" in states:
681 # target_state = "failed"
683 # # It at least one of the jobs is still running, the whole
684 # # build is in running state.
685 # elif "running" in states:
686 # target_state = "running"
688 # # If all jobs are in the finished state, we turn into finished
690 # elif all([s == "finished" for s in states]):
691 # target_state = "finished"
693 # return target_state
695 def auto_update_state(self
):
697 Check if the state of this build can be updated and perform
698 the change if possible.
700 # Do not change the broken/obsolete state automatically.
701 if self
.state
in ("broken", "obsolete"):
704 if self
.repo
and self
.repo
.type == "stable":
705 self
.update_state("stable")
708 # If any of the build jobs are finished, the build will be put in testing
710 for job
in self
.jobs
:
711 if job
.state
== "finished":
712 self
.update_state("testing")
715 def update_state(self
, state
, user
=None, remove
=False):
716 assert state
in ("stable", "testing", "obsolete", "broken")
718 self
.db
.execute("UPDATE builds SET state = %s WHERE id = %s", state
, self
.id)
721 self
._data
["state"] = state
723 # In broken state, the removal from the repository is forced and
724 # all jobs that are not finished yet will be aborted.
725 if state
== "broken":
728 for job
in self
.jobs
:
729 if job
.state
in ("new", "pending", "running", "dependency_error"):
730 job
.state
= "aborted"
732 # If this build is in a repository, it will leave it.
733 if remove
and self
.repo
:
734 self
.repo
.rem_build(self
)
736 # If a release build is now in testing state, we put it into the
737 # first repository of the distribution.
738 elif self
.type == "release" and state
== "testing":
739 # If the build is not in a repository, yet and if there is
740 # a first repository, we put the build there.
741 if not self
.repo
and self
.distro
.first_repo
:
742 self
.distro
.first_repo
.add_build(self
, user
=user
)
746 return self
.data
.state
749 return self
.state
== "broken"
751 def obsolete_others(self
):
752 if not self
.type == "release":
755 for build
in self
.pakfire
.builds
.get_by_name(self
.pkg
.name
, type="release"):
756 # Don't modify ourself.
757 if self
.id == build
.id:
760 # Don't touch broken builds.
761 if build
.state
in ("obsolete", "broken"):
764 # Obsolete the build.
765 build
.update_state("obsolete")
767 def set_severity(self
, severity
):
768 self
.db
.execute("UPDATE builds SET severity = %s WHERE id = %s", state
, self
.id)
771 self
._data
["severity"] = severity
773 def get_severity(self
):
774 return self
.data
.severity
776 severity
= property(get_severity
, set_severity
)
780 if self
.pkg
and self
.pkg
.commit
:
781 return self
.pkg
.commit
783 def update_message(self
, msg
):
784 self
.db
.execute("UPDATE builds SET message = %s WHERE id = %s", msg
, self
.id)
787 self
._data
["message"] = msg
789 def has_perm(self
, user
):
791 Check, if the given user has the right to perform administrative
792 operations on this build.
800 # Check if the user is allowed to manage packages from the critical path.
801 if self
.critical_path
and not user
.has_perm("manage_critical_path"):
804 # Search for maintainers...
807 if self
.type == "scratch":
808 # The owner of a scratch build has the right to do anything with it.
809 if self
.owner_id
== user
.id:
813 elif self
.type == "release":
814 # The maintainer also is allowed to manage the build.
815 if self
.pkg
.maintainer
== user
:
818 # Deny permission for all other cases.
825 if self
.data
.message
:
826 message
= self
.data
.message
829 if self
.commit
.message
:
830 message
= "\n".join((self
.commit
.subject
, self
.commit
.message
))
832 message
= self
.commit
.subject
834 prefix
= "%s: " % self
.pkg
.name
835 if message
.startswith(prefix
):
836 message
= message
[len(prefix
):]
840 def get_priority(self
):
841 return self
.data
.priority
843 def set_priority(self
, priority
):
844 assert priority
in (-2, -1, 0, 1, 2)
846 self
.db
.execute("UPDATE builds SET priority = %s WHERE id = %s", priority
,
850 self
._data
["priority"] = priority
852 priority
= property(get_priority
, set_priority
)
857 if self
.type == "scratch":
858 path
.append(BUILD_SCRATCH_DIR
)
859 path
.append(self
.uuid
)
861 elif self
.type == "release":
862 path
.append(BUILD_RELEASE_DIR
)
863 path
.append("%s/%s-%s-%s" % \
864 (self
.pkg
.name
, self
.pkg
.epoch
, self
.pkg
.version
, self
.pkg
.release
))
867 raise Exception, "Unknown build type: %s" % self
.type
869 return os
.path
.join(*path
)
872 def source_filename(self
):
873 return os
.path
.basename(self
.pkg
.path
)
876 def download_prefix(self
):
877 return "/".join((self
.pakfire
.settings
.get("download_baseurl"), "packages"))
880 def source_download(self
):
881 return "/".join((self
.download_prefix
, self
.pkg
.path
))
884 def source_hash_sha512(self
):
885 return self
.pkg
.hash_sha512
889 # XXX maybe this should rather live in a uimodule.
890 # zlib-1.2.3-2.ip3 [src, i686, blah...]
891 s
= """<a class="state_%s %s" href="/build/%s">%s</a>""" % \
892 (self
.state
, self
.type, self
.uuid
, self
.name
)
895 for job
in self
.jobs
:
896 s_jobs
.append("""<a class="state_%s %s" href="/job/%s">%s</a>""" % \
897 (job
.state
, job
.type, job
.uuid
, job
.arch
))
900 s
+= " [%s]" % ", ".join(s_jobs
)
905 def supported_arches(self
):
906 return self
.pkg
.supported_arches
909 def critical_path(self
):
910 return self
.pkg
.critical_path
912 def get_jobs(self
, type=None):
914 Returns a list of jobs of this build.
916 return self
.pakfire
.jobs
.get_by_build(self
.id, self
, type=type)
921 Get a list of all build jobs that are in this build.
923 if self
._jobs
is None:
924 self
._jobs
= self
.get_jobs(type="build")
930 if self
._jobs
_test
is None:
931 self
._jobs
_test
= self
.get_jobs(type="test")
933 return self
._jobs
_test
936 def all_jobs_finished(self
):
939 for job
in self
.jobs
:
940 if not job
.state
== "finished":
946 def create_autojobs(self
, arches
=None, type="build"):
949 # Arches may be passed to this function. If not we use all arches
950 # this package supports.
952 arches
= self
.supported_arches
954 # Create a new job for every given archirecture.
955 for arch
in self
.pakfire
.arches
.expand(arches
):
956 # Don't create jobs for src.
957 if arch
.name
== "src":
960 job
= self
.add_job(arch
, type=type)
963 # Return all newly created jobs.
966 def add_job(self
, arch
, type="build"):
967 job
= Job
.create(self
.pakfire
, self
, arch
, type=type)
969 # Add new job to cache.
971 self
._jobs
.append(job
)
979 if not self
.type == "release":
982 # Generate an update ID if none does exist, yet.
983 self
.generate_update_id()
986 "%s" % self
.distro
.name
.replace(" ", "").upper(),
987 "%04d" % (self
.data
.update_year
or 0),
988 "%04d" % (self
.data
.update_num
or 0),
993 def generate_update_id(self
):
994 if not self
.type == "release":
997 if self
.data
.update_num
:
1000 update
= self
.db
.get("SELECT update_num AS num FROM builds \
1001 WHERE update_year = YEAR(NOW()) ORDER BY update_num DESC LIMIT 1")
1004 update_num
= update
.num
+ 1
1008 self
.db
.execute("UPDATE builds SET update_year = YEAR(NOW()), update_num = %s \
1009 WHERE id = %s", update_num
, self
.id)
1013 def get_comments(self
, limit
=10, offset
=0):
1014 query
= "SELECT * FROM builds_comments \
1015 JOIN users ON builds_comments.user_id = users.id \
1016 WHERE build_id = %s ORDER BY time_created ASC"
1019 for comment
in self
.db
.query(query
, self
.id):
1020 comment
= logs
.CommentLogEntry(self
.pakfire
, comment
)
1021 comments
.append(comment
)
1025 def add_comment(self
, user
, text
, credit
):
1026 # Add the new comment to the database.
1027 id = self
.db
.execute("INSERT INTO \
1028 builds_comments(build_id, user_id, text, credit, time_created) \
1029 VALUES(%s, %s, %s, %s, NOW())",
1030 self
.id, user
.id, text
, credit
)
1032 # Update the credit cache.
1033 if not self
._credits
is None:
1034 self
._credits
+= credit
1036 # Send the new comment to all watchers and stuff.
1037 self
.send_comment_message(id)
1039 # Return the ID of the newly created comment.
1045 if self
._credits
is None:
1046 # Get the sum of the credits from the database.
1047 query
= self
.db
.get(
1048 "SELECT SUM(credit) as credits FROM builds_comments WHERE build_id = %s",
1052 self
._credits
= query
.credits
or 0
1054 return self
._credits
1061 def get_commenters(self
):
1062 users
= self
.db
.query("SELECT DISTINCT users.id AS id FROM builds_comments \
1063 JOIN users ON builds_comments.user_id = users.id \
1064 WHERE builds_comments.build_id = %s AND NOT users.deleted = 'Y' \
1065 AND NOT users.activated = 'Y' ORDER BY users.id", self
.id)
1067 return [users
.User(self
.pakfire
, u
.id) for u
in users
]
1069 def send_comment_message(self
, comment_id
):
1070 comment
= self
.db
.get("SELECT * FROM builds_comments WHERE id = %s",
1074 assert comment
.build_id
== self
.id
1076 # Get user who wrote the comment.
1077 user
= self
.pakfire
.users
.get_by_id(comment
.user_id
)
1080 "build_name" : self
.name
,
1081 "user_name" : user
.realname
,
1084 # XXX create beautiful message
1086 self
.pakfire
.messages
.send_to_all(self
.message_recipients
,
1087 N_("%(user_name)s commented on %(build_name)s"),
1088 comment
.text
, format
)
1092 def get_log(self
, comments
=True, repo
=True, limit
=None):
1096 created_entry
= logs
.CreatedLogEntry(self
.pakfire
, self
)
1097 entries
.append(created_entry
)
1100 entries
+= self
.get_comments(limit
=limit
)
1103 entries
+= self
.get_repo_moves(limit
=limit
)
1105 # Sort all entries in chronological order.
1109 entries
= entries
[:limit
]
1115 def get_watchers(self
):
1116 query
= self
.db
.query("SELECT DISTINCT users.id AS id FROM builds_watchers \
1117 JOIN users ON builds_watchers.user_id = users.id \
1118 WHERE builds_watchers.build_id = %s AND NOT users.deleted = 'Y' \
1119 AND users.activated = 'Y' ORDER BY users.id", self
.id)
1121 return [users
.User(self
.pakfire
, u
.id) for u
in query
]
1123 def add_watcher(self
, user
):
1124 # Don't add a user twice.
1125 if user
in self
.get_watchers():
1128 self
.db
.execute("INSERT INTO builds_watchers(build_id, user_id) \
1129 VALUES(%s, %s)", self
.id, user
.id)
1132 def message_recipients(self
):
1135 for watcher
in self
.get_watchers():
1136 ret
.append("%s <%s>" % (watcher
.realname
, watcher
.email
))
1142 if self
._update
is None:
1143 update
= self
.db
.get("SELECT update_id AS id FROM updates_builds \
1144 WHERE build_id = %s", self
.id)
1147 self
._update
= updates
.Update(self
.pakfire
, update
.id)
1153 if self
._repo
is None:
1154 repo
= self
.db
.get("SELECT repo_id AS id FROM repositories_builds \
1155 WHERE build_id = %s", self
.id)
1158 self
._repo
= repository
.Repository(self
.pakfire
, repo
.id)
1162 def get_repo_moves(self
, limit
=None):
1163 query
= "SELECT * FROM repositories_history \
1164 WHERE build_id = %s ORDER BY time ASC"
1167 for action
in self
.db
.query(query
, self
.id):
1168 action
= logs
.RepositoryLogEntry(self
.pakfire
, action
)
1169 actions
.append(action
)
1181 def repo_time(self
):
1182 repo
= self
.db
.get("SELECT time_added FROM repositories_builds \
1183 WHERE build_id = %s", self
.id)
1186 return repo
.time_added
1188 def get_auto_move(self
):
1189 return self
.data
.auto_move
== "Y"
1191 def set_auto_move(self
, state
):
1197 self
.db
.execute("UPDATE builds SET auto_move = %s WHERE id = %s", self
.id)
1199 self
._data
["auto_move"] = state
1201 auto_move
= property(get_auto_move
, set_auto_move
)
1204 def can_move_forward(self
):
1208 # If there is no next repository, we cannot move anything.
1209 if not self
.repo
.next
:
1212 # If the needed amount of score is reached, we can move forward.
1213 if self
.score
>= self
.repo
.next
.score_needed
:
1216 # If the repository does not require a minimal time,
1217 # we can move forward immediately.
1218 if not self
.repo
.time_min
:
1221 query
= self
.db
.get("SELECT NOW() - time_added AS duration FROM repositories_builds \
1222 WHERE build_id = %s", self
.id)
1223 duration
= query
.duration
1225 if duration
>= self
.repo
.time_min
:
1232 def get_bug_ids(self
):
1233 query
= self
.db
.query("SELECT bug_id FROM builds_bugs \
1234 WHERE build_id = %s", self
.id)
1236 return [b
.bug_id
for b
in query
]
1238 def add_bug(self
, bug_id
, user
=None, log
=True):
1239 # Check if this bug is already in the list of bugs.
1240 if bug_id
in self
.get_bug_ids():
1243 self
.db
.execute("INSERT INTO builds_bugs(build_id, bug_id) \
1244 VALUES(%s, %s)", self
.id, bug_id
)
1248 self
.log("bug_added", user
=user
, bug_id
=bug_id
)
1250 def rem_bug(self
, bug_id
, user
=None, log
=True):
1251 self
.db
.execute("DELETE FROM builds_bugs WHERE build_id = %s AND \
1252 bug_id = %s", self
.id, bug_id
)
1256 self
.log("bug_removed", user
=user
, bug_id
=bug_id
)
1258 def search_for_bugs(self
):
1262 pattern
= re
.compile(r
"(bug\s?|#)(\d+)")
1264 for txt
in (self
.commit
.subject
, self
.commit
.message
):
1265 for bug
in re
.finditer(pattern
, txt
):
1267 bugid
= int(bug
.group(2))
1271 # Check if a bug with the given ID exists in BZ.
1272 bug
= self
.pakfire
.bugzilla
.get_bug(bugid
)
1280 for bug_id
in self
.get_bug_ids():
1281 bug
= self
.pakfire
.bugzilla
.get_bug(bug_id
)
1289 def _update_bugs_helper(self
, repo
):
1291 This function takes a new status and generates messages that
1292 are appended to all bugs.
1295 kwargs
= BUG_MESSAGES
[repo
.type].copy()
1299 baseurl
= self
.pakfire
.settings
.get("baseurl", "")
1301 "build_url" : "%s/build/%s" % (baseurl
, self
.uuid
),
1302 "distro_name" : self
.distro
.name
,
1303 "package_name" : self
.name
,
1304 "repo_name" : repo
.name
,
1306 kwargs
["comment"] = kwargs
["comment"] % args
1308 self
.update_bugs(**kwargs
)
1310 def _update_bug(self
, bug_id
, status
=None, resolution
=None, comment
=None):
1311 self
.db
.execute("INSERT INTO builds_bugs_updates(bug_id, status, resolution, comment, time) \
1312 VALUES(%s, %s, %s, %s, NOW())", bug_id
, status
, resolution
, comment
)
1314 def update_bugs(self
, status
, resolution
=None, comment
=None):
1315 # Update all bugs linked to this build.
1316 for bug_id
in self
.get_bug_ids():
1317 self
._update
_bug
(bug_id
, status
=status
, resolution
=resolution
, comment
=comment
)
1320 class Jobs(base
.Object
):
1321 def _get_job(self
, query
, *args
):
1322 res
= self
.db
.get(query
, *args
)
1325 return Job(self
.backend
, res
.id, data
=res
)
1327 def _get_jobs(self
, query
, *args
):
1328 res
= self
.db
.query(query
, *args
)
1331 yield Job(self
.backend
, row
.id, data
=row
)
1333 def create(self
, build
, arch
, type="build"):
1334 job
= self
._get
_job
("INSERT INTO jobs(uuid, type, build_id, arch, time_created) \
1335 VALUES(%s, %s, %s, %s, NOW()) RETURNING *", "%s" % uuid
.uuid4(), type, build
.id, arch
)
1338 # Set cache for Build object.
1341 # Jobs are by default in state "new" and wait for being checked
1342 # for dependencies. Packages that do have no build dependencies
1343 # can directly be forwarded to "pending" state.
1344 if not job
.pkg
.requires
:
1345 job
.state
= "pending"
1349 def get_by_id(self
, id, data
=None):
1350 return Job(self
.pakfire
, id, data
)
1352 def get_by_uuid(self
, uuid
):
1353 job
= self
.db
.get("SELECT id FROM jobs WHERE uuid = %s", uuid
)
1356 return self
.get_by_id(job
.id)
1358 def get_by_build(self
, build_id
, build
=None, type=None):
1360 Get all jobs in the specifies build.
1362 query
= "SELECT * FROM jobs WHERE build_id = %s"
1366 query
+= " AND type = %s"
1369 # Get IDs of all builds in this group.
1371 for job
in self
.db
.query(query
, *args
):
1372 job
= Job(self
.pakfire
, job
.id, job
)
1374 # If the Build object was set, we set it so it won't be retrieved
1375 # from the database again.
1381 # Return sorted list of jobs.
1384 def get_active(self
, host_id
=None, builder
=None, states
=None):
1386 host_id
= builder
.id
1389 states
= ["dispatching", "running", "uploading"]
1391 query
= "SELECT * FROM jobs WHERE state IN (%s)" % ", ".join(["%s"] * len(states
))
1395 query
+= " AND builder_id = %s" % host_id
1397 query
+= " ORDER BY \
1399 WHEN jobs.state = 'running' THEN 0 \
1400 WHEN jobs.state = 'uploading' THEN 1 \
1401 WHEN jobs.state = 'dispatching' THEN 2 \
1402 WHEN jobs.state = 'pending' THEN 3 \
1403 WHEN jobs.state = 'new' THEN 4 \
1404 END, time_started ASC"
1406 return [Job(self
.pakfire
, j
.id, j
) for j
in self
.db
.query(query
, *args
)]
1408 def get_latest(self
, arch
=None, builder
=None, limit
=None, age
=None, date
=None):
1409 query
= "SELECT * FROM jobs"
1412 where
= ["(state = 'finished' OR state = 'failed' OR state = 'aborted')"]
1415 where
.append("arch = %s")
1419 where
.append("builder_id = %s")
1420 args
.append(builder
.id)
1424 year
, month
, day
= date
.split("-", 2)
1425 date
= datetime
.date(int(year
), int(month
), int(day
))
1429 where
.append("(DATE(time_created) = %s OR \
1430 DATE(time_started) = %s OR DATE(time_finished) = %s)")
1431 args
+= (date
, date
, date
)
1434 where
.append("time_finished >= NOW() - '%s'::interval" % age
)
1437 query
+= " WHERE %s" % " AND ".join(where
)
1439 query
+= " ORDER BY time_finished DESC"
1442 query
+= " LIMIT %s"
1445 return [Job(self
.pakfire
, j
.id, j
) for j
in self
.db
.query(query
, *args
)]
1447 def get_average_build_time(self
):
1449 Returns the average build time of all finished builds from the
1452 result
= self
.db
.get("SELECT AVG(time_finished - time_started) as average \
1453 FROM jobs WHERE type = 'build' AND state = 'finished' AND \
1454 time_finished >= NOW() - '3 months'::interval")
1457 return result
.average
1459 def count(self
, *states
):
1460 query
= "SELECT COUNT(*) AS count FROM jobs"
1464 query
+= " WHERE state IN %s"
1467 jobs
= self
.db
.get(query
, *args
)
1471 def restart_failed(self
, max_tries
=9):
1472 jobs
= self
._get
_jobs
("SELECT jobs.* FROM jobs \
1473 JOIN builds ON builds.id = jobs.build_id \
1475 jobs.type = 'build' AND \
1476 jobs.state = 'failed' AND \
1477 jobs.tries <= %s AND \
1478 NOT builds.state = 'broken' AND \
1479 jobs.time_finished < NOW() - '72 hours'::interval \
1482 WHEN jobs.type = 'build' THEN 0 \
1483 WHEN jobs.type = 'test' THEN 1 \
1485 builds.priority DESC, jobs.time_created ASC",
1490 job
.set_state("new", log
=False)
1493 class Job(base
.DataObject
):
1497 return "<%s id=%s %s>" % (self
.__class
__.__name
__, self
.id, self
.name
)
1499 def __eq__(self
, other
):
1500 if isinstance(other
, self
.__class
__):
1501 return self
.id == other
.id
1503 def __lt__(self
, other
):
1504 if isinstance(other
, self
.__class
__):
1505 if (self
.type, other
.type) == ("build", "test"):
1508 if self
.build
== other
.build
:
1509 return arches
.priority(self
.arch
) < arches
.priority(other
.arch
)
1511 return self
.time_created
< other
.time_created
1514 packages
= self
.backend
.packages
._get
_packages
("SELECT packages.* FROM jobs_packages \
1515 LEFT JOIN packages ON jobs_packages.pkg_id = packages.id \
1516 WHERE jobs_packages.job_id = %s ORDER BY packages.name", self
.id)
1518 return iter(packages
)
1520 def __nonzero__(self
):
1524 res
= self
.db
.get("SELECT COUNT(*) AS len FROM jobs_packages \
1525 WHERE job_id = %s", self
.id)
1531 return self
.build
.distro
1534 self
.__delete
_buildroots
()
1535 self
.__delete
_history
()
1536 self
.__delete
_packages
()
1537 self
.__delete
_logfiles
()
1539 # Delete the job itself.
1540 self
.db
.execute("DELETE FROM jobs WHERE id = %s", self
.id)
1542 def __delete_buildroots(self
):
1544 Removes all buildroots.
1546 self
.db
.execute("DELETE FROM jobs_buildroots WHERE job_id = %s", self
.id)
1548 def __delete_history(self
):
1550 Removes all references in the history to this build job.
1552 self
.db
.execute("DELETE FROM jobs_history WHERE job_id = %s", self
.id)
1554 def __delete_packages(self
):
1556 Deletes all uploaded files from the job.
1558 for pkg
in self
.packages
:
1561 self
.db
.execute("DELETE FROM jobs_packages WHERE job_id = %s", self
.id)
1563 def __delete_logfiles(self
):
1564 for logfile
in self
.logfiles
:
1565 self
.db
.execute("INSERT INTO queue_delete(path) VALUES(%s)", logfile
.path
)
1567 def reset(self
, user
=None):
1568 self
.__delete
_buildroots
()
1569 self
.__delete
_packages
()
1570 self
.__delete
_history
()
1571 self
.__delete
_logfiles
()
1574 self
.log("reset", user
=user
)
1578 def log(self
, action
, user
=None, state
=None, builder
=None, test_job
=None):
1585 builder_id
= builder
.id
1589 test_job_id
= test_job
.id
1591 self
.db
.execute("INSERT INTO jobs_history(job_id, action, state, user_id, \
1592 time, builder_id, test_job_id) VALUES(%s, %s, %s, %s, NOW(), %s, %s)",
1593 self
.id, action
, state
, user_id
, builder_id
, test_job_id
)
1595 def get_log(self
, limit
=None, offset
=None, user
=None):
1596 query
= "SELECT * FROM jobs_history"
1598 conditions
= ["job_id = %s",]
1602 conditions
.append("user_id = %s")
1603 args
.append(user
.id)
1606 query
+= " WHERE %s" % " AND ".join(conditions
)
1608 query
+= " ORDER BY time DESC"
1612 query
+= " LIMIT %s,%s"
1613 args
+= [offset
, limit
,]
1615 query
+= " LIMIT %s"
1619 for entry
in self
.db
.query(query
, *args
):
1620 entry
= logs
.JobLogEntry(self
.pakfire
, entry
)
1621 entries
.append(entry
)
1627 return self
.data
.uuid
1631 return self
.data
.type
1635 return self
.data
.build_id
1639 return self
.pakfire
.builds
.get_by_id(self
.build_id
)
1642 def related_jobs(self
):
1645 for job
in self
.build
.jobs
:
1655 return self
.build
.pkg
1659 return "%s-%s.%s" % (self
.pkg
.name
, self
.pkg
.friendly_version
, self
.arch
)
1663 return sum((p
.size
for p
in self
.packages
))
1668 Returns the rank in the build queue
1670 if not self
.state
== "pending":
1673 res
= self
.db
.get("SELECT rank FROM jobs_queue WHERE job_id = %s", self
.id)
1678 def is_running(self
):
1680 Returns True if job is in a running state.
1682 return self
.state
in ("pending", "dispatching", "running", "uploading")
1684 def get_state(self
):
1685 return self
.data
.state
1687 def set_state(self
, state
, user
=None, log
=True):
1688 # Nothing to do if the state remains.
1689 if not self
.state
== state
:
1690 self
.db
.execute("UPDATE jobs SET state = %s WHERE id = %s", state
, self
.id)
1693 if log
and not state
== "new":
1694 self
.log("state_change", state
=state
, user
=user
)
1698 self
._data
["state"] = state
1700 # Always clear the message when the status is changed.
1701 self
.update_message(None)
1703 # Update some more informations.
1704 if state
== "dispatching":
1706 self
.db
.execute("UPDATE jobs SET time_started = NOW(), time_finished = NULL \
1707 WHERE id = %s", self
.id)
1709 elif state
== "pending":
1710 self
.db
.execute("UPDATE jobs SET tries = tries + 1, time_started = NULL, \
1711 time_finished = NULL WHERE id = %s", self
.id)
1713 elif state
in ("aborted", "dependency_error", "finished", "failed"):
1714 # Set finish time and reset builder..
1715 self
.db
.execute("UPDATE jobs SET time_finished = NOW() WHERE id = %s", self
.id)
1717 # Send messages to the user.
1718 if state
== "finished":
1719 self
.send_finished_message()
1721 elif state
== "failed":
1722 # Remove all package files if a job is set to failed state.
1723 self
.__delete
_packages
()
1725 self
.send_failed_message()
1727 # Automatically update the state of the build (not on test builds).
1728 if self
.type == "build":
1729 self
.build
.auto_update_state()
1731 state
= property(get_state
, set_state
)
1735 return self
.data
.message
1737 def update_message(self
, msg
):
1738 self
.db
.execute("UPDATE jobs SET message = %s WHERE id = %s",
1742 self
._data
["message"] = msg
1744 def get_builder(self
):
1745 if self
.data
.builder_id
:
1746 return self
.backend
.builders
.get_by_id(self
.data
.builder_id
)
1748 def set_builder(self
, builder
, user
=None):
1749 self
.db
.execute("UPDATE jobs SET builder_id = %s WHERE id = %s",
1750 builder
.id, self
.id)
1754 self
._data
["builder_id"] = builder
.id
1756 self
._builder
= builder
1760 self
.log("builder_assigned", builder
=builder
, user
=user
)
1762 builder
= lazy_property(get_builder
, set_builder
)
1766 return self
.data
.arch
1770 if not self
.time_started
:
1773 if self
.time_finished
:
1774 delta
= self
.time_finished
- self
.time_started
1776 delta
= datetime
.datetime
.utcnow() - self
.time_started
1778 return delta
.total_seconds()
1781 def time_created(self
):
1782 return self
.data
.time_created
1785 def time_started(self
):
1786 return self
.data
.time_started
1789 def time_finished(self
):
1790 return self
.data
.time_finished
1793 def expected_runtime(self
):
1795 Returns the estimated time and stddev, this job takes to finish.
1797 # Get the average build time.
1798 build_times
= self
.pakfire
.builds
.get_build_times_by_arch(self
.arch
,
1801 # If there is no statistical data, we cannot estimate anything.
1805 return build_times
.average
, build_times
.stddev
1809 expected_runtime
, stddev
= self
.expected_runtime
1811 if expected_runtime
:
1812 return expected_runtime
- int(self
.duration
), stddev
1816 return self
.data
.tries
1818 def get_pkg_by_uuid(self
, uuid
):
1819 pkg
= self
.backend
.packages
._get
_package
("SELECT packages.id FROM packages \
1820 JOIN jobs_packages ON jobs_packages.pkg_id = packages.id \
1821 WHERE jobs_packages.job_id = %s AND packages.uuid = %s",
1832 for log
in self
.db
.query("SELECT id FROM logfiles WHERE job_id = %s", self
.id):
1833 log
= logs
.LogFile(self
.pakfire
, log
.id)
1836 logfiles
.append(log
)
1840 def add_file(self
, filename
):
1842 Add the specified file to this job.
1844 The file is copied to the right directory by this function.
1846 assert os
.path
.exists(filename
)
1848 if filename
.endswith(".log"):
1849 self
._add
_file
_log
(filename
)
1851 elif filename
.endswith(".%s" % PACKAGE_EXTENSION
):
1852 # It is not allowed to upload packages on test builds.
1853 if self
.type == "test":
1856 self
._add
_file
_package
(filename
)
1858 def _add_file_log(self
, filename
):
1860 Attach a log file to this job.
1862 target_dirname
= os
.path
.join(self
.build
.path
, "logs")
1864 if self
.type == "test":
1867 target_filename
= os
.path
.join(target_dirname
,
1868 "test.%s.%s.%s.log" % (self
.arch
, i
, self
.tries
))
1870 if os
.path
.exists(target_filename
):
1875 target_filename
= os
.path
.join(target_dirname
,
1876 "build.%s.%s.log" % (self
.arch
, self
.tries
))
1878 # Make sure the target directory exists.
1879 if not os
.path
.exists(target_dirname
):
1880 os
.makedirs(target_dirname
)
1882 # Calculate a SHA512 hash from that file.
1883 f
= open(filename
, "rb")
1884 h
= hashlib
.sha512()
1886 buf
= f
.read(BUFFER_SIZE
)
1893 # Copy the file to the final location.
1894 shutil
.copy2(filename
, target_filename
)
1896 # Create an entry in the database.
1897 self
.db
.execute("INSERT INTO logfiles(job_id, path, filesize, hash_sha512) \
1898 VALUES(%s, %s, %s, %s)", self
.id, os
.path
.relpath(target_filename
, PACKAGES_DIR
),
1899 os
.path
.getsize(target_filename
), h
.hexdigest())
1901 def _add_file_package(self
, filename
):
1902 # Open package (creates entry in the database).
1903 pkg
= packages
.Package
.open(self
.pakfire
, filename
)
1905 # Move package to the build directory.
1906 pkg
.move(os
.path
.join(self
.build
.path
, self
.arch
))
1908 # Attach the package to this job.
1909 self
.db
.execute("INSERT INTO jobs_packages(job_id, pkg_id) VALUES(%s, %s)",
1912 def get_aborted_state(self
):
1913 return self
.data
.aborted_state
1915 def set_aborted_state(self
, state
):
1916 self
._set
_attribute
("aborted_state", state
)
1918 aborted_state
= property(get_aborted_state
, set_aborted_state
)
1921 def message_recipients(self
):
1924 # Add all people watching the build.
1925 l
+= self
.build
.message_recipients
1927 # Add the package maintainer on release builds.
1928 if self
.build
.type == "release":
1929 maint
= self
.pkg
.maintainer
1931 if isinstance(maint
, users
.User
):
1932 l
.append("%s <%s>" % (maint
.realname
, maint
.email
))
1936 # XXX add committer and commit author.
1938 # Add the owner of the scratch build on scratch builds.
1939 elif self
.build
.type == "scratch" and self
.build
.user
:
1940 l
.append("%s <%s>" % \
1941 (self
.build
.user
.realname
, self
.build
.user
.email
))
1945 def save_buildroot(self
, pkgs
):
1948 for pkg_name
, pkg_uuid
in pkgs
:
1949 rows
.append((self
.id, self
.tries
, pkg_uuid
, pkg_name
))
1951 # Cleanup old stuff first (for rebuilding packages).
1952 self
.db
.execute("DELETE FROM jobs_buildroots WHERE job_id = %s AND tries = %s",
1953 self
.id, self
.tries
)
1955 self
.db
.executemany("INSERT INTO \
1956 jobs_buildroots(job_id, tries, pkg_uuid, pkg_name) \
1957 VALUES(%s, %s, %s, %s)", rows
)
1959 def has_buildroot(self
, tries
=None):
1963 res
= self
.db
.get("SELECT COUNT(*) AS num FROM jobs_buildroots \
1964 WHERE jobs_buildroots.job_id = %s AND jobs_buildroots.tries = %s",
1972 def get_buildroot(self
, tries
=None):
1976 rows
= self
.db
.query("SELECT * FROM jobs_buildroots \
1977 WHERE jobs_buildroots.job_id = %s AND jobs_buildroots.tries = %s \
1978 ORDER BY pkg_name", self
.id, tries
)
1982 # Search for this package in the packages table.
1983 pkg
= self
.pakfire
.packages
.get_by_uuid(row
.pkg_uuid
)
1984 pkgs
.append((row
.pkg_name
, row
.pkg_uuid
, pkg
))
1988 def send_finished_message(self
):
1989 # Send no finished mails for test jobs.
1990 if self
.type == "test":
1993 logging
.debug("Sending finished message for job %s to %s" % \
1994 (self
.name
, ", ".join(self
.message_recipients
)))
1997 "build_name" : self
.name
,
1998 "build_host" : self
.builder
.name
,
1999 "build_uuid" : self
.uuid
,
2002 self
.pakfire
.messages
.send_to_all(self
.message_recipients
,
2003 MSG_BUILD_FINISHED_SUBJECT
, MSG_BUILD_FINISHED
, info
)
2005 def send_failed_message(self
):
2006 logging
.debug("Sending failed message for job %s to %s" % \
2007 (self
.name
, ", ".join(self
.message_recipients
)))
2011 build_host
= self
.builder
.name
2014 "build_name" : self
.name
,
2015 "build_host" : build_host
,
2016 "build_uuid" : self
.uuid
,
2019 self
.pakfire
.messages
.send_to_all(self
.message_recipients
,
2020 MSG_BUILD_FAILED_SUBJECT
, MSG_BUILD_FAILED
, info
)
2022 def set_start_time(self
, start_time
):
2023 if start_time
is None:
2026 self
.db
.execute("UPDATE jobs SET start_not_before = NOW() + %s \
2027 WHERE id = %s LIMIT 1", start_time
, self
.id)
2029 def schedule(self
, type, start_time
=None, user
=None):
2030 assert type in ("rebuild", "test")
2032 if type == "rebuild":
2033 if self
.state
== "finished":
2036 self
.set_state("new", user
=user
, log
=False)
2037 self
.set_start_time(start_time
)
2040 self
.log("schedule_rebuild", user
=user
)
2042 elif type == "test":
2043 if not self
.state
== "finished":
2046 # Create a new job with same build and arch.
2047 job
= self
.create(self
.pakfire
, self
.build
, self
.arch
, type="test")
2048 job
.set_start_time(start_time
)
2051 self
.log("schedule_test_job", test_job
=job
, user
=user
)
2055 def schedule_test(self
, start_not_before
=None, user
=None):
2057 return self
.schedule("test", start_time
=start_not_before
, user
=user
)
2059 def schedule_rebuild(self
, start_not_before
=None, user
=None):
2061 return self
.schedule("rebuild", start_time
=start_not_before
, user
=user
)
2063 def get_build_repos(self
):
2065 Returns a list of all repositories that should be used when
2068 repo_ids
= self
.db
.query("SELECT repo_id FROM jobs_repos WHERE job_id = %s",
2072 return self
.distro
.get_build_repos()
2075 for repo
in self
.distro
.repositories
:
2076 if repo
.id in [r
.id for r
in repo_ids
]:
2079 return repos
or self
.distro
.get_build_repos()
2081 def get_repo_config(self
):
2083 Get repository configuration file that is sent to the builder.
2087 for repo
in self
.get_build_repos():
2088 confs
.append(repo
.get_conf())
2090 return "\n\n".join(confs
)
2092 def get_config(self
):
2094 Get configuration file that is sent to the builder.
2098 # Add the distribution configuration.
2099 confs
.append(self
.distro
.get_config())
2101 # Then add all repositories for this build.
2102 confs
.append(self
.get_repo_config())
2104 return "\n\n".join(confs
)
2106 def resolvdep(self
):
2107 config
= pakfire
.config
.Config(files
=["general.conf"])
2108 config
.parse(self
.get_config())
2110 # The filename of the source file.
2111 filename
= os
.path
.join(PACKAGES_DIR
, self
.build
.pkg
.path
)
2112 assert os
.path
.exists(filename
), filename
2114 # Create a new pakfire instance with the configuration for
2116 p
= pakfire
.PakfireServer(config
=config
, arch
=self
.arch
)
2118 # Try to solve the build dependencies.
2120 solver
= p
.resolvdep(filename
)
2122 # Catch dependency errors and log the problem string.
2123 except DependencyError
, e
:
2124 self
.state
= "dependency_error"
2125 self
.update_message(e
)
2128 # If the build dependencies can be resolved, we set the build in
2130 if solver
.status
is True:
2131 if self
.state
in ("failed",):
2134 self
.state
= "pending"