13 import pakfire
.packages
17 from . import builders
19 from . import packages
20 from . import repository
24 log
= logging
.getLogger("builds")
27 from .constants
import *
28 from .decorators
import *
30 class Builds(base
.Object
):
31 def _get_build(self
, query
, *args
):
32 res
= self
.db
.get(query
, *args
)
35 return Build(self
.backend
, res
.id, data
=res
)
37 def _get_builds(self
, query
, *args
):
38 res
= self
.db
.query(query
, *args
)
41 yield Build(self
.backend
, row
.id, data
=row
)
43 def get_by_id(self
, id, data
=None):
44 return Build(self
.pakfire
, id, data
=data
)
46 def get_by_uuid(self
, uuid
):
47 build
= self
.db
.get("SELECT id FROM builds WHERE uuid = %s LIMIT 1", uuid
)
50 return self
.get_by_id(build
.id)
52 def get_all(self
, limit
=50):
53 query
= "SELECT * FROM builds ORDER BY time_created DESC"
56 query
+= " LIMIT %d" % limit
58 return [self
.get_by_id(b
.id, b
) for b
in self
.db
.query(query
)]
60 def get_by_user(self
, user
, type=None, public
=None):
64 if not type or type == "scratch":
65 # On scratch builds the user id equals the owner id.
66 conditions
.append("(builds.type = 'scratch' AND owner_id = %s)")
69 elif not type or type == "release":
73 conditions
.append("public = 'Y'")
75 conditions
.append("public = 'N'")
77 query
= "SELECT builds.* AS id FROM builds \
78 JOIN packages ON builds.pkg_id = packages.id"
81 query
+= " WHERE %s" % " AND ".join(conditions
)
83 query
+= " ORDER BY builds.time_created DESC"
86 for build
in self
.db
.query(query
, *args
):
87 build
= Build(self
.pakfire
, build
.id, build
)
92 def get_by_name(self
, name
, type=None, public
=None, user
=None, limit
=None, offset
=None):
99 conditions
.append("builds.type = %s")
104 or_conditions
.append("public = 'Y'")
105 elif public
is False:
106 or_conditions
.append("public = 'N'")
108 if user
and not user
.is_admin():
109 or_conditions
.append("builds.owner_id = %s")
112 query
= "SELECT builds.* AS id FROM builds \
113 JOIN packages ON builds.pkg_id = packages.id"
116 conditions
.append(" OR ".join(or_conditions
))
119 query
+= " WHERE %s" % " AND ".join(conditions
)
121 if type == "release":
122 query
+= " ORDER BY packages.name,packages.epoch,packages.version,packages.release,id ASC"
123 elif type == "scratch":
124 query
+= " ORDER BY time_created DESC"
128 query
+= " LIMIT %s,%s"
129 args
.extend([offset
, limit
])
134 return [Build(self
.pakfire
, b
.id, b
) for b
in self
.db
.query(query
, *args
)]
136 def get_latest_by_name(self
, name
, type=None, public
=None):
138 SELECT * FROM builds \
139 LEFT JOIN builds_latest ON builds.id = builds_latest.build_id \
140 WHERE builds_latest.package_name = %s"
144 query
+= " AND builds_latest.build_type = %s"
148 query
+= " AND builds.public = %s"
150 elif public
is False:
151 query
+= " AND builds.public = %s"
154 # Get the last one only.
155 # Prefer release builds over scratch builds.
158 CASE builds.type WHEN 'release' THEN 0 ELSE 1 END, \
159 builds.time_created DESC \
162 res
= self
.db
.get(query
, *args
)
165 return Build(self
.pakfire
, res
.id, res
)
167 def get_active_builds(self
, name
, public
=None):
169 SELECT * FROM builds \
170 LEFT JOIN builds_latest ON builds.id = builds_latest.build_id \
171 WHERE builds_latest.package_name = %s AND builds.type = %s"
172 args
= [name
, "release"]
175 query
+= " AND builds.public = %s"
177 elif public
is False:
178 query
+= " AND builds.public = %s"
182 for row
in self
.db
.query(query
, *args
):
183 b
= Build(self
.pakfire
, row
.id, row
)
186 # Sort the result. Lastest build first.
187 builds
.sort(reverse
=True)
192 builds
= self
.db
.get("SELECT COUNT(*) AS count FROM builds")
196 def get_obsolete(self
, repo
=None):
198 Get all obsoleted builds.
200 If repo is True: which are in any repository.
201 If repo is some Repository object: which are in this repository.
206 query
= "SELECT id FROM builds WHERE state = 'obsolete'"
209 query
= "SELECT build_id AS id FROM repositories_builds \
210 JOIN builds ON builds.id = repositories_builds.build_id \
211 WHERE builds.state = 'obsolete'"
213 if repo
and not repo
is True:
214 query
+= " AND repositories_builds.repo_id = %s"
217 res
= self
.db
.query(query
, *args
)
221 build
= Build(self
.pakfire
, build
.id)
226 def create(self
, pkg
, type="release", owner
=None, distro
=None):
227 assert type in ("release", "scratch", "test")
228 assert distro
, "You need to specify the distribution of this build."
230 # Check if scratch build has an owner.
231 if type == "scratch" and not owner
:
232 raise Exception, "Scratch builds require an owner"
234 # Set the default priority of this build.
235 if type == "release":
238 elif type == "scratch":
244 # Create build in database
245 build
= self
._get
_build
("INSERT INTO builds(uuid, pkg_id, type, distro_id, priority) \
246 VALUES(%s, %s, %s, %s, %s) RETURNING *", "%s" % uuid
.uuid4(), pkg
.id, type, distro
.id, priority
)
248 # Set the owner of this build
252 # Log that the build has been created.
253 build
.log("created", user
=owner
)
255 # Create directory where the files live
256 if not os
.path
.exists(build
.path
):
257 os
.makedirs(build
.path
)
259 # Move package file to the directory of the build.
260 build
.pkg
.move(os
.path
.join(build
.path
, "src"))
262 # Generate an update id.
263 build
.generate_update_id()
265 # Obsolete all other builds with the same name to track updates.
266 build
.obsolete_others()
268 # Search for possible bug IDs in the commit message.
269 build
.search_for_bugs()
273 def create_from_source_package(self
, filename
, distro
, commit
=None, type="release",
274 arches
=None, check_for_duplicates
=True, owner
=None):
277 # Open the package file to read some basic information.
278 pkg
= pakfire
.packages
.open(None, None, filename
)
280 if check_for_duplicates
:
281 if distro
.has_package(pkg
.name
, pkg
.epoch
, pkg
.version
, pkg
.release
):
282 log
.warning("Duplicate package detected: %s. Skipping." % pkg
)
285 # Open the package and add it to the database
286 pkg
= self
.backend
.packages
.create(filename
)
288 # Associate the package to the processed commit
292 # Create a new build object from the package
293 build
= self
.create(pkg
, type=type, owner
=owner
, distro
=distro
)
295 # Create all automatic jobs
296 build
.create_autojobs(arches
=arches
)
300 def get_changelog(self
, name
, public
=None, limit
=5, offset
=0):
301 query
= "SELECT builds.* FROM builds \
302 JOIN packages ON builds.pkg_id = packages.id \
307 args
= ["release", name
,]
310 query
+= " AND builds.public = %s"
312 elif public
== False:
313 query
+= " AND builds.public = %s"
316 query
+= " ORDER BY builds.time_created DESC"
320 query
+= " LIMIT %s,%s"
321 args
+= [offset
, limit
]
327 for b
in self
.db
.query(query
, *args
):
328 b
= Build(self
.pakfire
, b
.id, b
)
331 builds
.sort(reverse
=True)
335 def get_comments(self
, limit
=10, offset
=None, user
=None):
336 query
= "SELECT * FROM builds_comments \
337 JOIN users ON builds_comments.user_id = users.id"
342 wheres
.append("users.id = %s")
346 query
+= " WHERE %s" % " AND ".join(wheres
)
349 query
+= " ORDER BY time_created DESC"
354 query
+= " LIMIT %s,%s"
362 for comment
in self
.db
.query(query
, *args
):
363 comment
= logs
.CommentLogEntry(self
.pakfire
, comment
)
364 comments
.append(comment
)
368 def get_build_times_summary(self
, name
=None, job_type
=None, arch
=None):
371 builds_times.arch AS arch, \
372 MAX(duration) AS maximum, \
373 MIN(duration) AS minimum, \
374 AVG(duration) AS average, \
375 SUM(duration) AS sum, \
376 STDDEV_POP(duration) AS stddev \
378 LEFT JOIN builds ON builds_times.build_id = builds.id \
379 LEFT JOIN packages ON builds.pkg_id = packages.id"
386 conditions
.append("packages.name = %s")
389 # Filter by job types.
391 conditions
.append("builds_times.job_type = %s")
392 args
.append(job_type
)
396 conditions
.append("builds_times.arch = %s")
401 query
+= " WHERE %s" % " AND ".join(conditions
)
403 # Grouping and sorting.
404 query
+= " GROUP BY arch ORDER BY arch DESC"
406 return self
.db
.query(query
, *args
)
408 def get_build_times_by_arch(self
, arch
, **kwargs
):
413 build_times
= self
.get_build_times_summary(**kwargs
)
415 return build_times
[0]
418 class Build(base
.Object
):
421 def __init__(self
, pakfire
, id, data
=None):
422 base
.Object
.__init
__(self
, pakfire
)
430 self
._jobs
_test
= None
431 self
._depends
_on
= None
439 return "<%s id=%s %s>" % (self
.__class
__.__name
__, self
.id, self
.pkg
)
441 def __cmp__(self
, other
):
445 return cmp(self
.pkg
, other
.pkg
)
448 jobs
= self
.backend
.jobs
._get
_jobs
("SELECT * FROM jobs \
449 WHERE build_id = %s", self
.id)
451 return iter(sorted(jobs
))
455 Deletes this build including all jobs, packages and the source
458 # If the build is in a repository, we need to remove it.
460 self
.repo
.rem_build(self
)
462 for job
in self
.jobs
+ self
.test_jobs
:
468 # Delete everything related to this build.
470 self
.__delete
_comments
()
471 self
.__delete
_history
()
472 self
.__delete
_watchers
()
474 # Delete the build itself.
475 self
.db
.execute("DELETE FROM builds WHERE id = %s", self
.id)
477 def __delete_bugs(self
):
479 Delete all associated bugs.
481 self
.db
.execute("DELETE FROM builds_bugs WHERE build_id = %s", self
.id)
483 def __delete_comments(self
):
487 self
.db
.execute("DELETE FROM builds_comments WHERE build_id = %s", self
.id)
489 def __delete_history(self
):
491 Delete the repository history.
493 self
.db
.execute("DELETE FROM repositories_history WHERE build_id = %s", self
.id)
495 def __delete_watchers(self
):
499 self
.db
.execute("DELETE FROM builds_watchers WHERE build_id = %s", self
.id)
503 Resets the whole build so it can start again (as it has never
506 for job
in self
.jobs
:
509 #self.__delete_bugs()
510 self
.__delete
_comments
()
511 self
.__delete
_history
()
512 self
.__delete
_watchers
()
514 self
.state
= "building"
521 Lazy fetching of data for this object.
523 if self
._data
is None:
524 self
._data
= self
.db
.get("SELECT * FROM builds WHERE id = %s", self
.id)
532 A set of information that is sent to the XMLRPC client.
534 return { "uuid" : self
.uuid
}
536 def log(self
, action
, user
=None, bug_id
=None):
541 self
.db
.execute("INSERT INTO builds_history(build_id, action, user_id, time, bug_id) \
542 VALUES(%s, %s, %s, NOW(), %s)", self
.id, action
, user_id
, bug_id
)
547 The UUID of this build.
549 return self
.data
.uuid
554 Get package that is to be built in the build.
556 if self
._pkg
is None:
557 self
._pkg
= packages
.Package(self
.pakfire
, self
.data
.pkg_id
)
563 return "%s-%s" % (self
.pkg
.name
, self
.pkg
.friendly_version
)
568 The type of this build.
570 return self
.data
.type
574 The owner of this build.
576 if self
.data
.owner_id
:
577 return self
.backend
.users
.get_by_id(self
.data
.owner_id
)
579 def set_owner(self
, owner
):
581 self
._set
_attribute
("owner_id", owner
.id)
583 self
._set
_attribute
("owner_id", None)
585 owner
= lazy_property(get_owner
, set_owner
)
589 return self
.data
.distro_id
593 if self
._distro
is None:
594 self
._distro
= self
.pakfire
.distros
.get_by_id(self
.distro_id
)
601 if self
.type == "scratch":
604 def get_depends_on(self
):
605 if self
.data
.depends_on
and self
._depends
_on
is None:
606 self
._depends
_on
= Build(self
.pakfire
, self
.data
.depends_on
)
608 return self
._depends
_on
610 def set_depends_on(self
, build
):
611 self
.db
.execute("UPDATE builds SET depends_on = %s WHERE id = %s",
615 self
._depends
_on
= build
616 self
._data
["depends_on"] = build
.id
618 depends_on
= property(get_depends_on
, set_depends_on
)
622 return self
.data
.time_created
626 return self
.created
.date()
631 Is this build public?
633 return self
.data
.public
== "Y"
638 Returns the size on disk of this build.
642 # Add the source package.
647 s
+= sum((j
.size
for j
in self
.jobs
))
653 # # Cache all states.
654 # states = [j.state for j in self.jobs]
656 # target_state = "unknown"
658 # # If at least one job has failed, the whole build has failed.
659 # if "failed" in states:
660 # target_state = "failed"
662 # # It at least one of the jobs is still running, the whole
663 # # build is in running state.
664 # elif "running" in states:
665 # target_state = "running"
667 # # If all jobs are in the finished state, we turn into finished
669 # elif all([s == "finished" for s in states]):
670 # target_state = "finished"
672 # return target_state
674 def auto_update_state(self
):
676 Check if the state of this build can be updated and perform
677 the change if possible.
679 # Do not change the broken/obsolete state automatically.
680 if self
.state
in ("broken", "obsolete"):
683 if self
.repo
and self
.repo
.type == "stable":
684 self
.update_state("stable")
687 # If any of the build jobs are finished, the build will be put in testing
689 for job
in self
.jobs
:
690 if job
.state
== "finished":
691 self
.update_state("testing")
694 def update_state(self
, state
, user
=None, remove
=False):
695 assert state
in ("stable", "testing", "obsolete", "broken")
697 self
.db
.execute("UPDATE builds SET state = %s WHERE id = %s", state
, self
.id)
700 self
._data
["state"] = state
702 # In broken state, the removal from the repository is forced and
703 # all jobs that are not finished yet will be aborted.
704 if state
== "broken":
707 for job
in self
.jobs
:
708 if job
.state
in ("new", "pending", "running", "dependency_error"):
709 job
.state
= "aborted"
711 # If this build is in a repository, it will leave it.
712 if remove
and self
.repo
:
713 self
.repo
.rem_build(self
)
715 # If a release build is now in testing state, we put it into the
716 # first repository of the distribution.
717 elif self
.type == "release" and state
== "testing":
718 # If the build is not in a repository, yet and if there is
719 # a first repository, we put the build there.
720 if not self
.repo
and self
.distro
.first_repo
:
721 self
.distro
.first_repo
.add_build(self
, user
=user
)
725 return self
.data
.state
728 return self
.state
== "broken"
730 def obsolete_others(self
):
731 if not self
.type == "release":
734 for build
in self
.pakfire
.builds
.get_by_name(self
.pkg
.name
, type="release"):
735 # Don't modify ourself.
736 if self
.id == build
.id:
739 # Don't touch broken builds.
740 if build
.state
in ("obsolete", "broken"):
743 # Obsolete the build.
744 build
.update_state("obsolete")
746 def set_severity(self
, severity
):
747 self
.db
.execute("UPDATE builds SET severity = %s WHERE id = %s", state
, self
.id)
750 self
._data
["severity"] = severity
752 def get_severity(self
):
753 return self
.data
.severity
755 severity
= property(get_severity
, set_severity
)
759 if self
.pkg
and self
.pkg
.commit
:
760 return self
.pkg
.commit
762 def update_message(self
, msg
):
763 self
.db
.execute("UPDATE builds SET message = %s WHERE id = %s", msg
, self
.id)
766 self
._data
["message"] = msg
768 def has_perm(self
, user
):
770 Check, if the given user has the right to perform administrative
771 operations on this build.
779 # Check if the user is allowed to manage packages from the critical path.
780 if self
.critical_path
and not user
.has_perm("manage_critical_path"):
783 # Search for maintainers...
786 if self
.type == "scratch":
787 # The owner of a scratch build has the right to do anything with it.
788 if self
.owner_id
== user
.id:
792 elif self
.type == "release":
793 # The maintainer also is allowed to manage the build.
794 if self
.pkg
.maintainer
== user
:
797 # Deny permission for all other cases.
804 if self
.data
.message
:
805 message
= self
.data
.message
808 if self
.commit
.message
:
809 message
= "\n".join((self
.commit
.subject
, self
.commit
.message
))
811 message
= self
.commit
.subject
813 prefix
= "%s: " % self
.pkg
.name
814 if message
.startswith(prefix
):
815 message
= message
[len(prefix
):]
819 def get_priority(self
):
820 return self
.data
.priority
822 def set_priority(self
, priority
):
823 assert priority
in (-2, -1, 0, 1, 2)
825 self
.db
.execute("UPDATE builds SET priority = %s WHERE id = %s", priority
,
829 self
._data
["priority"] = priority
831 priority
= property(get_priority
, set_priority
)
836 if self
.type == "scratch":
837 path
.append(BUILD_SCRATCH_DIR
)
838 path
.append(self
.uuid
)
840 elif self
.type == "release":
841 path
.append(BUILD_RELEASE_DIR
)
842 path
.append("%s/%s-%s-%s" % \
843 (self
.pkg
.name
, self
.pkg
.epoch
, self
.pkg
.version
, self
.pkg
.release
))
846 raise Exception, "Unknown build type: %s" % self
.type
848 return os
.path
.join(*path
)
851 def source_filename(self
):
852 return os
.path
.basename(self
.pkg
.path
)
855 def download_prefix(self
):
856 return "/".join((self
.pakfire
.settings
.get("download_baseurl"), "packages"))
859 def source_download(self
):
860 return "/".join((self
.download_prefix
, self
.pkg
.path
))
863 def source_hash_sha512(self
):
864 return self
.pkg
.hash_sha512
868 # XXX maybe this should rather live in a uimodule.
869 # zlib-1.2.3-2.ip3 [src, i686, blah...]
870 s
= """<a class="state_%s %s" href="/build/%s">%s</a>""" % \
871 (self
.state
, self
.type, self
.uuid
, self
.name
)
874 for job
in self
.jobs
:
875 s_jobs
.append("""<a class="state_%s %s" href="/job/%s">%s</a>""" % \
876 (job
.state
, job
.type, job
.uuid
, job
.arch
))
879 s
+= " [%s]" % ", ".join(s_jobs
)
884 def supported_arches(self
):
885 return self
.pkg
.supported_arches
888 def critical_path(self
):
889 return self
.pkg
.critical_path
891 def get_jobs(self
, type=None):
893 Returns a list of jobs of this build.
895 return self
.pakfire
.jobs
.get_by_build(self
.id, self
, type=type)
900 Get a list of all build jobs that are in this build.
902 if self
._jobs
is None:
903 self
._jobs
= self
.get_jobs(type="build")
909 if self
._jobs
_test
is None:
910 self
._jobs
_test
= self
.get_jobs(type="test")
912 return self
._jobs
_test
915 def all_jobs_finished(self
):
918 for job
in self
.jobs
:
919 if not job
.state
== "finished":
925 def create_autojobs(self
, arches
=None, type="build"):
928 # Arches may be passed to this function. If not we use all arches
929 # this package supports.
931 arches
= self
.supported_arches
933 # Create a new job for every given archirecture.
934 for arch
in self
.pakfire
.arches
.expand(arches
):
935 # Don't create jobs for src
939 job
= self
.add_job(arch
, type=type)
942 # Return all newly created jobs.
945 def add_job(self
, arch
, type="build"):
946 job
= Job
.create(self
.pakfire
, self
, arch
, type=type)
948 # Add new job to cache.
950 self
._jobs
.append(job
)
958 if not self
.type == "release":
961 # Generate an update ID if none does exist, yet.
962 self
.generate_update_id()
965 "%s" % self
.distro
.name
.replace(" ", "").upper(),
966 "%04d" % (self
.data
.update_year
or 0),
967 "%04d" % (self
.data
.update_num
or 0),
972 def generate_update_id(self
):
973 if not self
.type == "release":
976 if self
.data
.update_num
:
979 update
= self
.db
.get("SELECT update_num AS num FROM builds \
980 WHERE update_year = EXTRACT(year FROM NOW()) ORDER BY update_num DESC LIMIT 1")
983 update_num
= update
.num
+ 1
987 self
.db
.execute("UPDATE builds SET update_year = EXTRACT(year FROM NOW()), update_num = %s \
988 WHERE id = %s", update_num
, self
.id)
992 def get_comments(self
, limit
=10, offset
=0):
993 query
= "SELECT * FROM builds_comments \
994 JOIN users ON builds_comments.user_id = users.id \
995 WHERE build_id = %s ORDER BY time_created ASC"
998 for comment
in self
.db
.query(query
, self
.id):
999 comment
= logs
.CommentLogEntry(self
.pakfire
, comment
)
1000 comments
.append(comment
)
1004 def add_comment(self
, user
, text
, credit
):
1005 # Add the new comment to the database.
1006 id = self
.db
.execute("INSERT INTO \
1007 builds_comments(build_id, user_id, text, credit, time_created) \
1008 VALUES(%s, %s, %s, %s, NOW())",
1009 self
.id, user
.id, text
, credit
)
1011 # Update the credit cache.
1012 if not self
._credits
is None:
1013 self
._credits
+= credit
1015 # Send the new comment to all watchers and stuff.
1016 self
.send_comment_message(id)
1018 # Return the ID of the newly created comment.
1024 if self
._credits
is None:
1025 # Get the sum of the credits from the database.
1026 query
= self
.db
.get(
1027 "SELECT SUM(credit) as credits FROM builds_comments WHERE build_id = %s",
1031 self
._credits
= query
.credits
or 0
1033 return self
._credits
1040 def get_commenters(self
):
1041 users
= self
.db
.query("SELECT DISTINCT users.id AS id FROM builds_comments \
1042 JOIN users ON builds_comments.user_id = users.id \
1043 WHERE builds_comments.build_id = %s AND NOT users.deleted = 'Y' \
1044 AND NOT users.activated = 'Y' ORDER BY users.id", self
.id)
1046 return [users
.User(self
.pakfire
, u
.id) for u
in users
]
1048 def send_comment_message(self
, comment_id
):
1049 comment
= self
.db
.get("SELECT * FROM builds_comments WHERE id = %s",
1053 assert comment
.build_id
== self
.id
1055 # Get user who wrote the comment.
1056 user
= self
.pakfire
.users
.get_by_id(comment
.user_id
)
1059 "build_name" : self
.name
,
1060 "user_name" : user
.realname
,
1063 # XXX create beautiful message
1065 self
.pakfire
.messages
.send_to_all(self
.message_recipients
,
1066 N_("%(user_name)s commented on %(build_name)s"),
1067 comment
.text
, format
)
1071 def get_log(self
, comments
=True, repo
=True, limit
=None):
1075 created_entry
= logs
.CreatedLogEntry(self
.pakfire
, self
)
1076 entries
.append(created_entry
)
1079 entries
+= self
.get_comments(limit
=limit
)
1082 entries
+= self
.get_repo_moves(limit
=limit
)
1084 # Sort all entries in chronological order.
1088 entries
= entries
[:limit
]
1094 def get_watchers(self
):
1095 query
= self
.db
.query("SELECT DISTINCT users.id AS id FROM builds_watchers \
1096 JOIN users ON builds_watchers.user_id = users.id \
1097 WHERE builds_watchers.build_id = %s AND NOT users.deleted = 'Y' \
1098 AND users.activated = 'Y' ORDER BY users.id", self
.id)
1100 return [users
.User(self
.pakfire
, u
.id) for u
in query
]
1102 def add_watcher(self
, user
):
1103 # Don't add a user twice.
1104 if user
in self
.get_watchers():
1107 self
.db
.execute("INSERT INTO builds_watchers(build_id, user_id) \
1108 VALUES(%s, %s)", self
.id, user
.id)
1111 def message_recipients(self
):
1114 for watcher
in self
.get_watchers():
1115 ret
.append("%s <%s>" % (watcher
.realname
, watcher
.email
))
1121 if self
._update
is None:
1122 update
= self
.db
.get("SELECT update_id AS id FROM updates_builds \
1123 WHERE build_id = %s", self
.id)
1126 self
._update
= updates
.Update(self
.pakfire
, update
.id)
1132 if self
._repo
is None:
1133 repo
= self
.db
.get("SELECT repo_id AS id FROM repositories_builds \
1134 WHERE build_id = %s", self
.id)
1137 self
._repo
= repository
.Repository(self
.pakfire
, repo
.id)
1141 def get_repo_moves(self
, limit
=None):
1142 query
= "SELECT * FROM repositories_history \
1143 WHERE build_id = %s ORDER BY time ASC"
1146 for action
in self
.db
.query(query
, self
.id):
1147 action
= logs
.RepositoryLogEntry(self
.pakfire
, action
)
1148 actions
.append(action
)
1160 def repo_time(self
):
1161 repo
= self
.db
.get("SELECT time_added FROM repositories_builds \
1162 WHERE build_id = %s", self
.id)
1165 return repo
.time_added
1167 def get_auto_move(self
):
1168 return self
.data
.auto_move
== "Y"
1170 def set_auto_move(self
, state
):
1176 self
.db
.execute("UPDATE builds SET auto_move = %s WHERE id = %s", self
.id)
1178 self
._data
["auto_move"] = state
1180 auto_move
= property(get_auto_move
, set_auto_move
)
1183 def can_move_forward(self
):
1187 # If there is no next repository, we cannot move anything.
1188 if not self
.repo
.next
:
1191 # If the needed amount of score is reached, we can move forward.
1192 if self
.score
>= self
.repo
.next
.score_needed
:
1195 # If the repository does not require a minimal time,
1196 # we can move forward immediately.
1197 if not self
.repo
.time_min
:
1200 query
= self
.db
.get("SELECT NOW() - time_added AS duration FROM repositories_builds \
1201 WHERE build_id = %s", self
.id)
1202 duration
= query
.duration
1204 if duration
>= self
.repo
.time_min
:
1211 def get_bug_ids(self
):
1212 query
= self
.db
.query("SELECT bug_id FROM builds_bugs \
1213 WHERE build_id = %s", self
.id)
1215 return [b
.bug_id
for b
in query
]
1217 def add_bug(self
, bug_id
, user
=None, log
=True):
1218 # Check if this bug is already in the list of bugs.
1219 if bug_id
in self
.get_bug_ids():
1222 self
.db
.execute("INSERT INTO builds_bugs(build_id, bug_id) \
1223 VALUES(%s, %s)", self
.id, bug_id
)
1227 self
.log("bug_added", user
=user
, bug_id
=bug_id
)
1229 def rem_bug(self
, bug_id
, user
=None, log
=True):
1230 self
.db
.execute("DELETE FROM builds_bugs WHERE build_id = %s AND \
1231 bug_id = %s", self
.id, bug_id
)
1235 self
.log("bug_removed", user
=user
, bug_id
=bug_id
)
1237 def search_for_bugs(self
):
1241 pattern
= re
.compile(r
"(bug\s?|#)(\d+)")
1243 for txt
in (self
.commit
.subject
, self
.commit
.message
):
1244 for bug
in re
.finditer(pattern
, txt
):
1246 bugid
= int(bug
.group(2))
1250 # Check if a bug with the given ID exists in BZ.
1251 bug
= self
.pakfire
.bugzilla
.get_bug(bugid
)
1259 for bug_id
in self
.get_bug_ids():
1260 bug
= self
.pakfire
.bugzilla
.get_bug(bug_id
)
1268 def _update_bugs_helper(self
, repo
):
1270 This function takes a new status and generates messages that
1271 are appended to all bugs.
1274 kwargs
= BUG_MESSAGES
[repo
.type].copy()
1278 baseurl
= self
.pakfire
.settings
.get("baseurl", "")
1280 "build_url" : "%s/build/%s" % (baseurl
, self
.uuid
),
1281 "distro_name" : self
.distro
.name
,
1282 "package_name" : self
.name
,
1283 "repo_name" : repo
.name
,
1285 kwargs
["comment"] = kwargs
["comment"] % args
1287 self
.update_bugs(**kwargs
)
1289 def _update_bug(self
, bug_id
, status
=None, resolution
=None, comment
=None):
1290 self
.db
.execute("INSERT INTO builds_bugs_updates(bug_id, status, resolution, comment, time) \
1291 VALUES(%s, %s, %s, %s, NOW())", bug_id
, status
, resolution
, comment
)
1293 def update_bugs(self
, status
, resolution
=None, comment
=None):
1294 # Update all bugs linked to this build.
1295 for bug_id
in self
.get_bug_ids():
1296 self
._update
_bug
(bug_id
, status
=status
, resolution
=resolution
, comment
=comment
)
1299 class Jobs(base
.Object
):
1300 def _get_job(self
, query
, *args
):
1301 res
= self
.db
.get(query
, *args
)
1304 return Job(self
.backend
, res
.id, data
=res
)
1306 def _get_jobs(self
, query
, *args
):
1307 res
= self
.db
.query(query
, *args
)
1310 yield Job(self
.backend
, row
.id, data
=row
)
1312 def create(self
, build
, arch
, type="build"):
1313 job
= self
._get
_job
("INSERT INTO jobs(uuid, type, build_id, arch, time_created) \
1314 VALUES(%s, %s, %s, %s, NOW()) RETURNING *", "%s" % uuid
.uuid4(), type, build
.id, arch
)
1317 # Set cache for Build object.
1320 # Jobs are by default in state "new" and wait for being checked
1321 # for dependencies. Packages that do have no build dependencies
1322 # can directly be forwarded to "pending" state.
1323 if not job
.pkg
.requires
:
1324 job
.state
= "pending"
1328 def get_by_id(self
, id, data
=None):
1329 return Job(self
.pakfire
, id, data
)
1331 def get_by_uuid(self
, uuid
):
1332 job
= self
.db
.get("SELECT id FROM jobs WHERE uuid = %s", uuid
)
1335 return self
.get_by_id(job
.id)
1337 def get_by_build(self
, build_id
, build
=None, type=None):
1339 Get all jobs in the specifies build.
1341 query
= "SELECT * FROM jobs WHERE build_id = %s"
1345 query
+= " AND type = %s"
1348 # Get IDs of all builds in this group.
1350 for job
in self
.db
.query(query
, *args
):
1351 job
= Job(self
.pakfire
, job
.id, job
)
1353 # If the Build object was set, we set it so it won't be retrieved
1354 # from the database again.
1360 # Return sorted list of jobs.
1363 def get_active(self
, host_id
=None, builder
=None, states
=None):
1365 host_id
= builder
.id
1368 states
= ["dispatching", "running", "uploading"]
1370 query
= "SELECT * FROM jobs WHERE state IN (%s)" % ", ".join(["%s"] * len(states
))
1374 query
+= " AND builder_id = %s" % host_id
1376 query
+= " ORDER BY \
1378 WHEN jobs.state = 'running' THEN 0 \
1379 WHEN jobs.state = 'uploading' THEN 1 \
1380 WHEN jobs.state = 'dispatching' THEN 2 \
1381 WHEN jobs.state = 'pending' THEN 3 \
1382 WHEN jobs.state = 'new' THEN 4 \
1383 END, time_started ASC"
1385 return [Job(self
.pakfire
, j
.id, j
) for j
in self
.db
.query(query
, *args
)]
1387 def get_latest(self
, arch
=None, builder
=None, limit
=None, age
=None, date
=None):
1388 query
= "SELECT * FROM jobs"
1391 where
= ["(state = 'finished' OR state = 'failed' OR state = 'aborted')"]
1394 where
.append("arch = %s")
1398 where
.append("builder_id = %s")
1399 args
.append(builder
.id)
1403 year
, month
, day
= date
.split("-", 2)
1404 date
= datetime
.date(int(year
), int(month
), int(day
))
1408 where
.append("(time_created::date = %s OR \
1409 time_started::date = %s OR time_finished::date = %s)")
1410 args
+= (date
, date
, date
)
1413 where
.append("time_finished >= NOW() - '%s'::interval" % age
)
1416 query
+= " WHERE %s" % " AND ".join(where
)
1418 query
+= " ORDER BY time_finished DESC"
1421 query
+= " LIMIT %s"
1424 return [Job(self
.pakfire
, j
.id, j
) for j
in self
.db
.query(query
, *args
)]
1426 def get_average_build_time(self
):
1428 Returns the average build time of all finished builds from the
1431 result
= self
.db
.get("SELECT AVG(time_finished - time_started) as average \
1432 FROM jobs WHERE type = 'build' AND state = 'finished' AND \
1433 time_finished >= NOW() - '3 months'::interval")
1436 return result
.average
1438 def count(self
, *states
):
1439 query
= "SELECT COUNT(*) AS count FROM jobs"
1443 query
+= " WHERE state IN %s"
1446 jobs
= self
.db
.get(query
, *args
)
1450 def restart_failed(self
, max_tries
=9):
1451 jobs
= self
._get
_jobs
("SELECT jobs.* FROM jobs \
1452 JOIN builds ON builds.id = jobs.build_id \
1454 jobs.type = 'build' AND \
1455 jobs.state = 'failed' AND \
1456 jobs.tries <= %s AND \
1457 NOT builds.state = 'broken' AND \
1458 jobs.time_finished < NOW() - '72 hours'::interval \
1461 WHEN jobs.type = 'build' THEN 0 \
1462 WHEN jobs.type = 'test' THEN 1 \
1464 builds.priority DESC, jobs.time_created ASC",
1469 job
.set_state("new", log
=False)
1472 class Job(base
.DataObject
):
1476 return "<%s id=%s %s>" % (self
.__class
__.__name
__, self
.id, self
.name
)
1478 def __eq__(self
, other
):
1479 if isinstance(other
, self
.__class
__):
1480 return self
.id == other
.id
1482 def __lt__(self
, other
):
1483 if isinstance(other
, self
.__class
__):
1484 if (self
.type, other
.type) == ("build", "test"):
1487 if self
.build
== other
.build
:
1488 return arches
.priority(self
.arch
) < arches
.priority(other
.arch
)
1490 return self
.time_created
< other
.time_created
1493 packages
= self
.backend
.packages
._get
_packages
("SELECT packages.* FROM jobs_packages \
1494 LEFT JOIN packages ON jobs_packages.pkg_id = packages.id \
1495 WHERE jobs_packages.job_id = %s ORDER BY packages.name", self
.id)
1497 return iter(packages
)
1499 def __nonzero__(self
):
1503 res
= self
.db
.get("SELECT COUNT(*) AS len FROM jobs_packages \
1504 WHERE job_id = %s", self
.id)
1510 return self
.build
.distro
1513 self
.__delete
_buildroots
()
1514 self
.__delete
_history
()
1515 self
.__delete
_packages
()
1516 self
.__delete
_logfiles
()
1518 # Delete the job itself.
1519 self
.db
.execute("DELETE FROM jobs WHERE id = %s", self
.id)
1521 def __delete_buildroots(self
):
1523 Removes all buildroots.
1525 self
.db
.execute("DELETE FROM jobs_buildroots WHERE job_id = %s", self
.id)
1527 def __delete_history(self
):
1529 Removes all references in the history to this build job.
1531 self
.db
.execute("DELETE FROM jobs_history WHERE job_id = %s", self
.id)
1533 def __delete_packages(self
):
1535 Deletes all uploaded files from the job.
1537 for pkg
in self
.packages
:
1540 self
.db
.execute("DELETE FROM jobs_packages WHERE job_id = %s", self
.id)
1542 def __delete_logfiles(self
):
1543 for logfile
in self
.logfiles
:
1544 self
.db
.execute("INSERT INTO queue_delete(path) VALUES(%s)", logfile
.path
)
1546 def reset(self
, user
=None):
1547 self
.__delete
_buildroots
()
1548 self
.__delete
_packages
()
1549 self
.__delete
_history
()
1550 self
.__delete
_logfiles
()
1553 self
.log("reset", user
=user
)
1557 def log(self
, action
, user
=None, state
=None, builder
=None, test_job
=None):
1564 builder_id
= builder
.id
1568 test_job_id
= test_job
.id
1570 self
.db
.execute("INSERT INTO jobs_history(job_id, action, state, user_id, \
1571 time, builder_id, test_job_id) VALUES(%s, %s, %s, %s, NOW(), %s, %s)",
1572 self
.id, action
, state
, user_id
, builder_id
, test_job_id
)
1574 def get_log(self
, limit
=None, offset
=None, user
=None):
1575 query
= "SELECT * FROM jobs_history"
1577 conditions
= ["job_id = %s",]
1581 conditions
.append("user_id = %s")
1582 args
.append(user
.id)
1585 query
+= " WHERE %s" % " AND ".join(conditions
)
1587 query
+= " ORDER BY time DESC"
1591 query
+= " LIMIT %s,%s"
1592 args
+= [offset
, limit
,]
1594 query
+= " LIMIT %s"
1598 for entry
in self
.db
.query(query
, *args
):
1599 entry
= logs
.JobLogEntry(self
.pakfire
, entry
)
1600 entries
.append(entry
)
1606 return self
.data
.uuid
1610 return self
.data
.type
1614 return self
.data
.build_id
1618 return self
.pakfire
.builds
.get_by_id(self
.build_id
)
1621 def related_jobs(self
):
1624 for job
in self
.build
.jobs
:
1634 return self
.build
.pkg
1638 return "%s-%s.%s" % (self
.pkg
.name
, self
.pkg
.friendly_version
, self
.arch
)
1642 return sum((p
.size
for p
in self
.packages
))
1647 Returns the rank in the build queue
1649 if not self
.state
== "pending":
1652 res
= self
.db
.get("SELECT rank FROM jobs_queue WHERE job_id = %s", self
.id)
1657 def is_running(self
):
1659 Returns True if job is in a running state.
1661 return self
.state
in ("pending", "dispatching", "running", "uploading")
1663 def get_state(self
):
1664 return self
.data
.state
1666 def set_state(self
, state
, user
=None, log
=True):
1667 # Nothing to do if the state remains.
1668 if not self
.state
== state
:
1669 self
.db
.execute("UPDATE jobs SET state = %s WHERE id = %s", state
, self
.id)
1672 if log
and not state
== "new":
1673 self
.log("state_change", state
=state
, user
=user
)
1677 self
._data
["state"] = state
1679 # Always clear the message when the status is changed.
1680 self
.update_message(None)
1682 # Update some more informations.
1683 if state
== "dispatching":
1685 self
.db
.execute("UPDATE jobs SET time_started = NOW(), time_finished = NULL \
1686 WHERE id = %s", self
.id)
1688 elif state
== "pending":
1689 self
.db
.execute("UPDATE jobs SET tries = tries + 1, time_started = NULL, \
1690 time_finished = NULL WHERE id = %s", self
.id)
1692 elif state
in ("aborted", "dependency_error", "finished", "failed"):
1693 # Set finish time and reset builder..
1694 self
.db
.execute("UPDATE jobs SET time_finished = NOW() WHERE id = %s", self
.id)
1696 # Send messages to the user.
1697 if state
== "finished":
1698 self
.send_finished_message()
1700 elif state
== "failed":
1701 # Remove all package files if a job is set to failed state.
1702 self
.__delete
_packages
()
1704 self
.send_failed_message()
1706 # Automatically update the state of the build (not on test builds).
1707 if self
.type == "build":
1708 self
.build
.auto_update_state()
1710 state
= property(get_state
, set_state
)
1714 return self
.data
.message
1716 def update_message(self
, msg
):
1717 self
.db
.execute("UPDATE jobs SET message = %s WHERE id = %s",
1721 self
._data
["message"] = msg
1723 def get_builder(self
):
1724 if self
.data
.builder_id
:
1725 return self
.backend
.builders
.get_by_id(self
.data
.builder_id
)
1727 def set_builder(self
, builder
, user
=None):
1728 self
.db
.execute("UPDATE jobs SET builder_id = %s WHERE id = %s",
1729 builder
.id, self
.id)
1733 self
._data
["builder_id"] = builder
.id
1735 self
._builder
= builder
1739 self
.log("builder_assigned", builder
=builder
, user
=user
)
1741 builder
= lazy_property(get_builder
, set_builder
)
1745 return self
.data
.arch
1749 if not self
.time_started
:
1752 if self
.time_finished
:
1753 delta
= self
.time_finished
- self
.time_started
1755 delta
= datetime
.datetime
.utcnow() - self
.time_started
1757 return delta
.total_seconds()
1760 def time_created(self
):
1761 return self
.data
.time_created
1764 def time_started(self
):
1765 return self
.data
.time_started
1768 def time_finished(self
):
1769 return self
.data
.time_finished
1772 def expected_runtime(self
):
1774 Returns the estimated time and stddev, this job takes to finish.
1776 # Get the average build time.
1777 build_times
= self
.pakfire
.builds
.get_build_times_by_arch(self
.arch
,
1780 # If there is no statistical data, we cannot estimate anything.
1784 return build_times
.average
, build_times
.stddev
1788 expected_runtime
, stddev
= self
.expected_runtime
1790 if expected_runtime
:
1791 return expected_runtime
- int(self
.duration
), stddev
1795 return self
.data
.tries
1797 def get_pkg_by_uuid(self
, uuid
):
1798 pkg
= self
.backend
.packages
._get
_package
("SELECT packages.id FROM packages \
1799 JOIN jobs_packages ON jobs_packages.pkg_id = packages.id \
1800 WHERE jobs_packages.job_id = %s AND packages.uuid = %s",
1811 for log
in self
.db
.query("SELECT id FROM logfiles WHERE job_id = %s", self
.id):
1812 log
= logs
.LogFile(self
.pakfire
, log
.id)
1815 logfiles
.append(log
)
1819 def add_file(self
, filename
):
1821 Add the specified file to this job.
1823 The file is copied to the right directory by this function.
1825 assert os
.path
.exists(filename
)
1827 if filename
.endswith(".log"):
1828 self
._add
_file
_log
(filename
)
1830 elif filename
.endswith(".%s" % PACKAGE_EXTENSION
):
1831 # It is not allowed to upload packages on test builds.
1832 if self
.type == "test":
1835 self
._add
_file
_package
(filename
)
1837 def _add_file_log(self
, filename
):
1839 Attach a log file to this job.
1841 target_dirname
= os
.path
.join(self
.build
.path
, "logs")
1843 if self
.type == "test":
1846 target_filename
= os
.path
.join(target_dirname
,
1847 "test.%s.%s.%s.log" % (self
.arch
, i
, self
.tries
))
1849 if os
.path
.exists(target_filename
):
1854 target_filename
= os
.path
.join(target_dirname
,
1855 "build.%s.%s.log" % (self
.arch
, self
.tries
))
1857 # Make sure the target directory exists.
1858 if not os
.path
.exists(target_dirname
):
1859 os
.makedirs(target_dirname
)
1861 # Calculate a SHA512 hash from that file.
1862 f
= open(filename
, "rb")
1863 h
= hashlib
.sha512()
1865 buf
= f
.read(BUFFER_SIZE
)
1872 # Copy the file to the final location.
1873 shutil
.copy2(filename
, target_filename
)
1875 # Create an entry in the database.
1876 self
.db
.execute("INSERT INTO logfiles(job_id, path, filesize, hash_sha512) \
1877 VALUES(%s, %s, %s, %s)", self
.id, os
.path
.relpath(target_filename
, PACKAGES_DIR
),
1878 os
.path
.getsize(target_filename
), h
.hexdigest())
1880 def _add_file_package(self
, filename
):
1881 # Open package (creates entry in the database).
1882 pkg
= packages
.Package
.open(self
.pakfire
, filename
)
1884 # Move package to the build directory.
1885 pkg
.move(os
.path
.join(self
.build
.path
, self
.arch
))
1887 # Attach the package to this job.
1888 self
.db
.execute("INSERT INTO jobs_packages(job_id, pkg_id) VALUES(%s, %s)",
1891 def get_aborted_state(self
):
1892 return self
.data
.aborted_state
1894 def set_aborted_state(self
, state
):
1895 self
._set
_attribute
("aborted_state", state
)
1897 aborted_state
= property(get_aborted_state
, set_aborted_state
)
1900 def message_recipients(self
):
1903 # Add all people watching the build.
1904 l
+= self
.build
.message_recipients
1906 # Add the package maintainer on release builds.
1907 if self
.build
.type == "release":
1908 maint
= self
.pkg
.maintainer
1910 if isinstance(maint
, users
.User
):
1911 l
.append("%s <%s>" % (maint
.realname
, maint
.email
))
1915 # XXX add committer and commit author.
1917 # Add the owner of the scratch build on scratch builds.
1918 elif self
.build
.type == "scratch" and self
.build
.user
:
1919 l
.append("%s <%s>" % \
1920 (self
.build
.user
.realname
, self
.build
.user
.email
))
1924 def save_buildroot(self
, pkgs
):
1927 for pkg_name
, pkg_uuid
in pkgs
:
1928 rows
.append((self
.id, self
.tries
, pkg_uuid
, pkg_name
))
1930 # Cleanup old stuff first (for rebuilding packages).
1931 self
.db
.execute("DELETE FROM jobs_buildroots WHERE job_id = %s AND tries = %s",
1932 self
.id, self
.tries
)
1934 self
.db
.executemany("INSERT INTO \
1935 jobs_buildroots(job_id, tries, pkg_uuid, pkg_name) \
1936 VALUES(%s, %s, %s, %s)", rows
)
1938 def has_buildroot(self
, tries
=None):
1942 res
= self
.db
.get("SELECT COUNT(*) AS num FROM jobs_buildroots \
1943 WHERE jobs_buildroots.job_id = %s AND jobs_buildroots.tries = %s",
1951 def get_buildroot(self
, tries
=None):
1955 rows
= self
.db
.query("SELECT * FROM jobs_buildroots \
1956 WHERE jobs_buildroots.job_id = %s AND jobs_buildroots.tries = %s \
1957 ORDER BY pkg_name", self
.id, tries
)
1961 # Search for this package in the packages table.
1962 pkg
= self
.pakfire
.packages
.get_by_uuid(row
.pkg_uuid
)
1963 pkgs
.append((row
.pkg_name
, row
.pkg_uuid
, pkg
))
1967 def send_finished_message(self
):
1968 # Send no finished mails for test jobs.
1969 if self
.type == "test":
1972 logging
.debug("Sending finished message for job %s to %s" % \
1973 (self
.name
, ", ".join(self
.message_recipients
)))
1976 "build_name" : self
.name
,
1977 "build_host" : self
.builder
.name
,
1978 "build_uuid" : self
.uuid
,
1981 self
.pakfire
.messages
.send_to_all(self
.message_recipients
,
1982 MSG_BUILD_FINISHED_SUBJECT
, MSG_BUILD_FINISHED
, info
)
1984 def send_failed_message(self
):
1985 logging
.debug("Sending failed message for job %s to %s" % \
1986 (self
.name
, ", ".join(self
.message_recipients
)))
1990 build_host
= self
.builder
.name
1993 "build_name" : self
.name
,
1994 "build_host" : build_host
,
1995 "build_uuid" : self
.uuid
,
1998 self
.pakfire
.messages
.send_to_all(self
.message_recipients
,
1999 MSG_BUILD_FAILED_SUBJECT
, MSG_BUILD_FAILED
, info
)
2001 def set_start_time(self
, start_time
):
2002 if start_time
is None:
2005 self
.db
.execute("UPDATE jobs SET start_not_before = NOW() + %s \
2006 WHERE id = %s LIMIT 1", start_time
, self
.id)
2008 def schedule(self
, type, start_time
=None, user
=None):
2009 assert type in ("rebuild", "test")
2011 if type == "rebuild":
2012 if self
.state
== "finished":
2015 self
.set_state("new", user
=user
, log
=False)
2016 self
.set_start_time(start_time
)
2019 self
.log("schedule_rebuild", user
=user
)
2021 elif type == "test":
2022 if not self
.state
== "finished":
2025 # Create a new job with same build and arch.
2026 job
= self
.create(self
.pakfire
, self
.build
, self
.arch
, type="test")
2027 job
.set_start_time(start_time
)
2030 self
.log("schedule_test_job", test_job
=job
, user
=user
)
2034 def schedule_test(self
, start_not_before
=None, user
=None):
2036 return self
.schedule("test", start_time
=start_not_before
, user
=user
)
2038 def schedule_rebuild(self
, start_not_before
=None, user
=None):
2040 return self
.schedule("rebuild", start_time
=start_not_before
, user
=user
)
2042 def get_build_repos(self
):
2044 Returns a list of all repositories that should be used when
2047 repo_ids
= self
.db
.query("SELECT repo_id FROM jobs_repos WHERE job_id = %s",
2051 return self
.distro
.get_build_repos()
2054 for repo
in self
.distro
.repositories
:
2055 if repo
.id in [r
.id for r
in repo_ids
]:
2058 return repos
or self
.distro
.get_build_repos()
2060 def get_repo_config(self
):
2062 Get repository configuration file that is sent to the builder.
2066 for repo
in self
.get_build_repos():
2067 confs
.append(repo
.get_conf())
2069 return "\n\n".join(confs
)
2071 def get_config(self
):
2073 Get configuration file that is sent to the builder.
2077 # Add the distribution configuration.
2078 confs
.append(self
.distro
.get_config())
2080 # Then add all repositories for this build.
2081 confs
.append(self
.get_repo_config())
2083 return "\n\n".join(confs
)
2085 def resolvdep(self
):
2086 config
= pakfire
.config
.Config(files
=["general.conf"])
2087 config
.parse(self
.get_config())
2089 # The filename of the source file.
2090 filename
= os
.path
.join(PACKAGES_DIR
, self
.build
.pkg
.path
)
2091 assert os
.path
.exists(filename
), filename
2093 # Create a new pakfire instance with the configuration for
2095 p
= pakfire
.PakfireServer(config
=config
, arch
=self
.arch
)
2097 # Try to solve the build dependencies.
2099 solver
= p
.resolvdep(filename
)
2101 # Catch dependency errors and log the problem string.
2102 except DependencyError
, e
:
2103 self
.state
= "dependency_error"
2104 self
.update_message(e
)
2107 # If the build dependencies can be resolved, we set the build in
2109 if solver
.status
is True:
2110 if self
.state
in ("failed",):
2113 self
.state
= "pending"