13 import pakfire
.packages
17 from . import builders
19 from . import packages
20 from . import repository
24 from .constants
import *
25 from .decorators
import *
27 def import_from_package(_pakfire
, filename
, distro
=None, commit
=None, type="release",
28 arches
=None, check_for_duplicates
=True, owner
=None):
31 distro
= commit
.source
.distro
35 # Open the package file to read some basic information.
36 pkg
= pakfire
.packages
.open(None, None, filename
)
38 if check_for_duplicates
:
39 if distro
.has_package(pkg
.name
, pkg
.epoch
, pkg
.version
, pkg
.release
):
40 logging
.warning("Duplicate package detected: %s. Skipping." % pkg
)
43 # Open the package and add it to the database.
44 pkg
= packages
.Package
.open(_pakfire
, filename
)
45 logging
.debug("Created new package: %s" % pkg
)
47 # Associate the package to the processed commit.
51 # Create a new build object from the package which
52 # is always a release build.
53 build
= Build
.create(_pakfire
, pkg
, type=type, owner
=owner
, distro
=distro
)
54 logging
.debug("Created new build job: %s" % build
)
56 # Create all automatic jobs.
57 build
.create_autojobs(arches
=arches
)
62 class Builds(base
.Object
):
63 def _get_build(self
, query
, *args
):
64 res
= self
.db
.get(query
, *args
)
67 return Build(self
.backend
, res
.id, data
=res
)
69 def _get_builds(self
, query
, *args
):
70 res
= self
.db
.query(query
, *args
)
73 yield Build(self
.backend
, row
.id, data
=row
)
75 def get_by_id(self
, id, data
=None):
76 return Build(self
.pakfire
, id, data
=data
)
78 def get_by_uuid(self
, uuid
):
79 build
= self
.db
.get("SELECT id FROM builds WHERE uuid = %s LIMIT 1", uuid
)
82 return self
.get_by_id(build
.id)
84 def get_all(self
, limit
=50):
85 query
= "SELECT * FROM builds ORDER BY time_created DESC"
88 query
+= " LIMIT %d" % limit
90 return [self
.get_by_id(b
.id, b
) for b
in self
.db
.query(query
)]
92 def get_by_user(self
, user
, type=None, public
=None):
96 if not type or type == "scratch":
97 # On scratch builds the user id equals the owner id.
98 conditions
.append("(builds.type = 'scratch' AND owner_id = %s)")
101 elif not type or type == "release":
105 conditions
.append("public = 'Y'")
106 elif public
is False:
107 conditions
.append("public = 'N'")
109 query
= "SELECT builds.* AS id FROM builds \
110 JOIN packages ON builds.pkg_id = packages.id"
113 query
+= " WHERE %s" % " AND ".join(conditions
)
115 query
+= " ORDER BY builds.time_created DESC"
118 for build
in self
.db
.query(query
, *args
):
119 build
= Build(self
.pakfire
, build
.id, build
)
124 def get_by_name(self
, name
, type=None, public
=None, user
=None, limit
=None, offset
=None):
127 "packages.name = %s",
131 conditions
.append("builds.type = %s")
136 or_conditions
.append("public = 'Y'")
137 elif public
is False:
138 or_conditions
.append("public = 'N'")
140 if user
and not user
.is_admin():
141 or_conditions
.append("builds.owner_id = %s")
144 query
= "SELECT builds.* AS id FROM builds \
145 JOIN packages ON builds.pkg_id = packages.id"
148 conditions
.append(" OR ".join(or_conditions
))
151 query
+= " WHERE %s" % " AND ".join(conditions
)
153 if type == "release":
154 query
+= " ORDER BY packages.name,packages.epoch,packages.version,packages.release,id ASC"
155 elif type == "scratch":
156 query
+= " ORDER BY time_created DESC"
160 query
+= " LIMIT %s,%s"
161 args
.extend([offset
, limit
])
166 return [Build(self
.pakfire
, b
.id, b
) for b
in self
.db
.query(query
, *args
)]
168 def get_latest_by_name(self
, name
, type=None, public
=None):
170 SELECT * FROM builds \
171 LEFT JOIN builds_latest ON builds.id = builds_latest.build_id \
172 WHERE builds_latest.package_name = %s"
176 query
+= " AND builds_latest.build_type = %s"
180 query
+= " AND builds.public = %s"
182 elif public
is False:
183 query
+= " AND builds.public = %s"
186 # Get the last one only.
187 # Prefer release builds over scratch builds.
190 CASE builds.type WHEN 'release' THEN 0 ELSE 1 END, \
191 builds.time_created DESC \
194 res
= self
.db
.get(query
, *args
)
197 return Build(self
.pakfire
, res
.id, res
)
199 def get_active_builds(self
, name
, public
=None):
201 SELECT * FROM builds \
202 LEFT JOIN builds_latest ON builds.id = builds_latest.build_id \
203 WHERE builds_latest.package_name = %s AND builds.type = %s"
204 args
= [name
, "release"]
207 query
+= " AND builds.public = %s"
209 elif public
is False:
210 query
+= " AND builds.public = %s"
214 for row
in self
.db
.query(query
, *args
):
215 b
= Build(self
.pakfire
, row
.id, row
)
218 # Sort the result. Lastest build first.
219 builds
.sort(reverse
=True)
224 builds
= self
.db
.get("SELECT COUNT(*) AS count FROM builds")
228 def needs_test(self
, threshold
, arch
, limit
=None, randomize
=False):
229 query
= "SELECT id FROM builds \
231 (SELECT * FROM jobs WHERE \
232 jobs.build_id = builds.id AND \
234 (jobs.state != 'finished' OR \
235 jobs.time_finished >= %s) \
238 (SELECT * FROM jobs WHERE \
239 jobs.build_id = builds.id AND \
241 jobs.type = 'build' AND \
242 jobs.state = 'finished' AND \
243 jobs.time_finished < %s \
245 AND builds.type = 'release' \
246 AND (builds.state = 'stable' OR builds.state = 'testing')"
247 args
= [arch
, threshold
, arch
, threshold
]
250 query
+= " ORDER BY RAND()"
256 return [Build(self
.pakfire
, b
.id) for b
in self
.db
.query(query
, *args
)]
258 def get_obsolete(self
, repo
=None):
260 Get all obsoleted builds.
262 If repo is True: which are in any repository.
263 If repo is some Repository object: which are in this repository.
268 query
= "SELECT id FROM builds WHERE state = 'obsolete'"
271 query
= "SELECT build_id AS id FROM repositories_builds \
272 JOIN builds ON builds.id = repositories_builds.build_id \
273 WHERE builds.state = 'obsolete'"
275 if repo
and not repo
is True:
276 query
+= " AND repositories_builds.repo_id = %s"
279 res
= self
.db
.query(query
, *args
)
283 build
= Build(self
.pakfire
, build
.id)
288 def get_changelog(self
, name
, public
=None, limit
=5, offset
=0):
289 query
= "SELECT builds.* FROM builds \
290 JOIN packages ON builds.pkg_id = packages.id \
295 args
= ["release", name
,]
298 query
+= " AND builds.public = %s"
300 elif public
== False:
301 query
+= " AND builds.public = %s"
304 query
+= " ORDER BY builds.time_created DESC"
308 query
+= " LIMIT %s,%s"
309 args
+= [offset
, limit
]
315 for b
in self
.db
.query(query
, *args
):
316 b
= Build(self
.pakfire
, b
.id, b
)
319 builds
.sort(reverse
=True)
323 def get_comments(self
, limit
=10, offset
=None, user
=None):
324 query
= "SELECT * FROM builds_comments \
325 JOIN users ON builds_comments.user_id = users.id"
330 wheres
.append("users.id = %s")
334 query
+= " WHERE %s" % " AND ".join(wheres
)
337 query
+= " ORDER BY time_created DESC"
342 query
+= " LIMIT %s,%s"
350 for comment
in self
.db
.query(query
, *args
):
351 comment
= logs
.CommentLogEntry(self
.pakfire
, comment
)
352 comments
.append(comment
)
356 def get_build_times_summary(self
, name
=None, job_type
=None, arch
=None):
359 builds_times.arch AS arch, \
360 MAX(duration) AS maximum, \
361 MIN(duration) AS minimum, \
362 AVG(duration) AS average, \
363 SUM(duration) AS sum, \
364 STDDEV_POP(duration) AS stddev \
366 LEFT JOIN builds ON builds_times.build_id = builds.id \
367 LEFT JOIN packages ON builds.pkg_id = packages.id"
374 conditions
.append("packages.name = %s")
377 # Filter by job types.
379 conditions
.append("builds_times.job_type = %s")
380 args
.append(job_type
)
384 conditions
.append("builds_times.arch = %s")
389 query
+= " WHERE %s" % " AND ".join(conditions
)
391 # Grouping and sorting.
392 query
+= " GROUP BY arch ORDER BY arch DESC"
394 return self
.db
.query(query
, *args
)
396 def get_build_times_by_arch(self
, arch
, **kwargs
):
401 build_times
= self
.get_build_times_summary(**kwargs
)
403 return build_times
[0]
406 class Build(base
.Object
):
407 def __init__(self
, pakfire
, id, data
=None):
408 base
.Object
.__init
__(self
, pakfire
)
416 self
._jobs
_test
= None
417 self
._depends
_on
= None
426 return "<%s id=%s %s>" % (self
.__class
__.__name
__, self
.id, self
.pkg
)
428 def __cmp__(self
, other
):
432 return cmp(self
.pkg
, other
.pkg
)
435 jobs
= self
.backend
.jobs
._get
_jobs
("SELECT * FROM jobs \
436 WHERE build_id = %s", self
.id)
438 return iter(sorted(jobs
))
441 def create(cls
, pakfire
, pkg
, type="release", owner
=None, distro
=None, public
=True):
442 assert type in ("release", "scratch", "test")
443 assert distro
, "You need to specify the distribution of this build."
450 # Check if scratch build has an owner.
451 if type == "scratch" and not owner
:
452 raise Exception, "Scratch builds require an owner"
454 # Set the default priority of this build.
455 if type == "release":
458 elif type == "scratch":
464 id = pakfire
.db
.execute("""
465 INSERT INTO builds(uuid, pkg_id, type, distro_id, time_created, public, priority)
466 VALUES(%s, %s, %s, %s, NOW(), %s, %s)""", "%s" % uuid
.uuid4(), pkg
.id,
467 type, distro
.id, public
, priority
)
469 # Set the owner of this buildgroup.
471 pakfire
.db
.execute("UPDATE builds SET owner_id = %s WHERE id = %s",
474 build
= cls(pakfire
, id)
476 # Log that the build has been created.
477 build
.log("created", user
=owner
)
479 # Create directory where the files live.
480 if not os
.path
.exists(build
.path
):
481 os
.makedirs(build
.path
)
483 # Move package file to the directory of the build.
484 source_path
= os
.path
.join(build
.path
, "src")
485 build
.pkg
.move(source_path
)
487 # Generate an update id.
488 build
.generate_update_id()
490 # Obsolete all other builds with the same name to track updates.
491 build
.obsolete_others()
493 # Search for possible bug IDs in the commit message.
494 build
.search_for_bugs()
500 Deletes this build including all jobs, packages and the source
503 # If the build is in a repository, we need to remove it.
505 self
.repo
.rem_build(self
)
507 for job
in self
.jobs
+ self
.test_jobs
:
513 # Delete everything related to this build.
515 self
.__delete
_comments
()
516 self
.__delete
_history
()
517 self
.__delete
_watchers
()
519 # Delete the build itself.
520 self
.db
.execute("DELETE FROM builds WHERE id = %s", self
.id)
522 def __delete_bugs(self
):
524 Delete all associated bugs.
526 self
.db
.execute("DELETE FROM builds_bugs WHERE build_id = %s", self
.id)
528 def __delete_comments(self
):
532 self
.db
.execute("DELETE FROM builds_comments WHERE build_id = %s", self
.id)
534 def __delete_history(self
):
536 Delete the repository history.
538 self
.db
.execute("DELETE FROM repositories_history WHERE build_id = %s", self
.id)
540 def __delete_watchers(self
):
544 self
.db
.execute("DELETE FROM builds_watchers WHERE build_id = %s", self
.id)
548 Resets the whole build so it can start again (as it has never
551 for job
in self
.jobs
:
554 #self.__delete_bugs()
555 self
.__delete
_comments
()
556 self
.__delete
_history
()
557 self
.__delete
_watchers
()
559 self
.state
= "building"
566 Lazy fetching of data for this object.
568 if self
._data
is None:
569 self
._data
= self
.db
.get("SELECT * FROM builds WHERE id = %s", self
.id)
577 A set of information that is sent to the XMLRPC client.
579 return { "uuid" : self
.uuid
}
581 def log(self
, action
, user
=None, bug_id
=None):
586 self
.db
.execute("INSERT INTO builds_history(build_id, action, user_id, time, bug_id) \
587 VALUES(%s, %s, %s, NOW(), %s)", self
.id, action
, user_id
, bug_id
)
592 The UUID of this build.
594 return self
.data
.uuid
599 Get package that is to be built in the build.
601 if self
._pkg
is None:
602 self
._pkg
= packages
.Package(self
.pakfire
, self
.data
.pkg_id
)
608 return "%s-%s" % (self
.pkg
.name
, self
.pkg
.friendly_version
)
613 The type of this build.
615 return self
.data
.type
620 The ID of the owner of this build.
622 return self
.data
.owner_id
627 The owner of this build.
629 if not self
.owner_id
:
632 if self
._owner
is None:
633 self
._owner
= self
.pakfire
.users
.get_by_id(self
.owner_id
)
640 return self
.data
.distro_id
644 if self
._distro
is None:
645 self
._distro
= self
.pakfire
.distros
.get_by_id(self
.distro_id
)
652 if self
.type == "scratch":
655 def get_depends_on(self
):
656 if self
.data
.depends_on
and self
._depends
_on
is None:
657 self
._depends
_on
= Build(self
.pakfire
, self
.data
.depends_on
)
659 return self
._depends
_on
661 def set_depends_on(self
, build
):
662 self
.db
.execute("UPDATE builds SET depends_on = %s WHERE id = %s",
666 self
._depends
_on
= build
667 self
._data
["depends_on"] = build
.id
669 depends_on
= property(get_depends_on
, set_depends_on
)
673 return self
.data
.time_created
677 return self
.created
.date()
682 Is this build public?
684 return self
.data
.public
== "Y"
689 Returns the size on disk of this build.
693 # Add the source package.
698 s
+= sum((j
.size
for j
in self
.jobs
))
704 # # Cache all states.
705 # states = [j.state for j in self.jobs]
707 # target_state = "unknown"
709 # # If at least one job has failed, the whole build has failed.
710 # if "failed" in states:
711 # target_state = "failed"
713 # # It at least one of the jobs is still running, the whole
714 # # build is in running state.
715 # elif "running" in states:
716 # target_state = "running"
718 # # If all jobs are in the finished state, we turn into finished
720 # elif all([s == "finished" for s in states]):
721 # target_state = "finished"
723 # return target_state
725 def auto_update_state(self
):
727 Check if the state of this build can be updated and perform
728 the change if possible.
730 # Do not change the broken/obsolete state automatically.
731 if self
.state
in ("broken", "obsolete"):
734 if self
.repo
and self
.repo
.type == "stable":
735 self
.update_state("stable")
738 # If any of the build jobs are finished, the build will be put in testing
740 for job
in self
.jobs
:
741 if job
.state
== "finished":
742 self
.update_state("testing")
745 def update_state(self
, state
, user
=None, remove
=False):
746 assert state
in ("stable", "testing", "obsolete", "broken")
748 self
.db
.execute("UPDATE builds SET state = %s WHERE id = %s", state
, self
.id)
751 self
._data
["state"] = state
753 # In broken state, the removal from the repository is forced and
754 # all jobs that are not finished yet will be aborted.
755 if state
== "broken":
758 for job
in self
.jobs
:
759 if job
.state
in ("new", "pending", "running", "dependency_error"):
760 job
.state
= "aborted"
762 # If this build is in a repository, it will leave it.
763 if remove
and self
.repo
:
764 self
.repo
.rem_build(self
)
766 # If a release build is now in testing state, we put it into the
767 # first repository of the distribution.
768 elif self
.type == "release" and state
== "testing":
769 # If the build is not in a repository, yet and if there is
770 # a first repository, we put the build there.
771 if not self
.repo
and self
.distro
.first_repo
:
772 self
.distro
.first_repo
.add_build(self
, user
=user
)
776 return self
.data
.state
779 return self
.state
== "broken"
781 def obsolete_others(self
):
782 if not self
.type == "release":
785 for build
in self
.pakfire
.builds
.get_by_name(self
.pkg
.name
, type="release"):
786 # Don't modify ourself.
787 if self
.id == build
.id:
790 # Don't touch broken builds.
791 if build
.state
in ("obsolete", "broken"):
794 # Obsolete the build.
795 build
.update_state("obsolete")
797 def set_severity(self
, severity
):
798 self
.db
.execute("UPDATE builds SET severity = %s WHERE id = %s", state
, self
.id)
801 self
._data
["severity"] = severity
803 def get_severity(self
):
804 return self
.data
.severity
806 severity
= property(get_severity
, set_severity
)
810 if self
.pkg
and self
.pkg
.commit
:
811 return self
.pkg
.commit
813 def update_message(self
, msg
):
814 self
.db
.execute("UPDATE builds SET message = %s WHERE id = %s", msg
, self
.id)
817 self
._data
["message"] = msg
819 def has_perm(self
, user
):
821 Check, if the given user has the right to perform administrative
822 operations on this build.
830 # Check if the user is allowed to manage packages from the critical path.
831 if self
.critical_path
and not user
.has_perm("manage_critical_path"):
834 # Search for maintainers...
837 if self
.type == "scratch":
838 # The owner of a scratch build has the right to do anything with it.
839 if self
.owner_id
== user
.id:
843 elif self
.type == "release":
844 # The maintainer also is allowed to manage the build.
845 if self
.pkg
.maintainer
== user
:
848 # Deny permission for all other cases.
855 if self
.data
.message
:
856 message
= self
.data
.message
859 if self
.commit
.message
:
860 message
= "\n".join((self
.commit
.subject
, self
.commit
.message
))
862 message
= self
.commit
.subject
864 prefix
= "%s: " % self
.pkg
.name
865 if message
.startswith(prefix
):
866 message
= message
[len(prefix
):]
870 def get_priority(self
):
871 return self
.data
.priority
873 def set_priority(self
, priority
):
874 assert priority
in (-2, -1, 0, 1, 2)
876 self
.db
.execute("UPDATE builds SET priority = %s WHERE id = %s", priority
,
880 self
._data
["priority"] = priority
882 priority
= property(get_priority
, set_priority
)
887 if self
.type == "scratch":
888 path
.append(BUILD_SCRATCH_DIR
)
889 path
.append(self
.uuid
)
891 elif self
.type == "release":
892 path
.append(BUILD_RELEASE_DIR
)
893 path
.append("%s/%s-%s-%s" % \
894 (self
.pkg
.name
, self
.pkg
.epoch
, self
.pkg
.version
, self
.pkg
.release
))
897 raise Exception, "Unknown build type: %s" % self
.type
899 return os
.path
.join(*path
)
902 def source_filename(self
):
903 return os
.path
.basename(self
.pkg
.path
)
906 def download_prefix(self
):
907 return "/".join((self
.pakfire
.settings
.get("download_baseurl"), "packages"))
910 def source_download(self
):
911 return "/".join((self
.download_prefix
, self
.pkg
.path
))
914 def source_hash_sha512(self
):
915 return self
.pkg
.hash_sha512
919 # XXX maybe this should rather live in a uimodule.
920 # zlib-1.2.3-2.ip3 [src, i686, blah...]
921 s
= """<a class="state_%s %s" href="/build/%s">%s</a>""" % \
922 (self
.state
, self
.type, self
.uuid
, self
.name
)
925 for job
in self
.jobs
:
926 s_jobs
.append("""<a class="state_%s %s" href="/job/%s">%s</a>""" % \
927 (job
.state
, job
.type, job
.uuid
, job
.arch
))
930 s
+= " [%s]" % ", ".join(s_jobs
)
935 def supported_arches(self
):
936 return self
.pkg
.supported_arches
939 def critical_path(self
):
940 return self
.pkg
.critical_path
942 def get_jobs(self
, type=None):
944 Returns a list of jobs of this build.
946 return self
.pakfire
.jobs
.get_by_build(self
.id, self
, type=type)
951 Get a list of all build jobs that are in this build.
953 if self
._jobs
is None:
954 self
._jobs
= self
.get_jobs(type="build")
960 if self
._jobs
_test
is None:
961 self
._jobs
_test
= self
.get_jobs(type="test")
963 return self
._jobs
_test
966 def all_jobs_finished(self
):
969 for job
in self
.jobs
:
970 if not job
.state
== "finished":
976 def create_autojobs(self
, arches
=None, type="build"):
979 # Arches may be passed to this function. If not we use all arches
980 # this package supports.
982 arches
= self
.supported_arches
984 # Create a new job for every given archirecture.
985 for arch
in self
.pakfire
.arches
.expand(arches
):
986 # Don't create jobs for src.
987 if arch
.name
== "src":
990 job
= self
.add_job(arch
, type=type)
993 # Return all newly created jobs.
996 def add_job(self
, arch
, type="build"):
997 job
= Job
.create(self
.pakfire
, self
, arch
, type=type)
999 # Add new job to cache.
1001 self
._jobs
.append(job
)
1008 def update_id(self
):
1009 if not self
.type == "release":
1012 # Generate an update ID if none does exist, yet.
1013 self
.generate_update_id()
1016 "%s" % self
.distro
.name
.replace(" ", "").upper(),
1017 "%04d" % (self
.data
.update_year
or 0),
1018 "%04d" % (self
.data
.update_num
or 0),
1023 def generate_update_id(self
):
1024 if not self
.type == "release":
1027 if self
.data
.update_num
:
1030 update
= self
.db
.get("SELECT update_num AS num FROM builds \
1031 WHERE update_year = YEAR(NOW()) ORDER BY update_num DESC LIMIT 1")
1034 update_num
= update
.num
+ 1
1038 self
.db
.execute("UPDATE builds SET update_year = YEAR(NOW()), update_num = %s \
1039 WHERE id = %s", update_num
, self
.id)
1043 def get_comments(self
, limit
=10, offset
=0):
1044 query
= "SELECT * FROM builds_comments \
1045 JOIN users ON builds_comments.user_id = users.id \
1046 WHERE build_id = %s ORDER BY time_created ASC"
1049 for comment
in self
.db
.query(query
, self
.id):
1050 comment
= logs
.CommentLogEntry(self
.pakfire
, comment
)
1051 comments
.append(comment
)
1055 def add_comment(self
, user
, text
, credit
):
1056 # Add the new comment to the database.
1057 id = self
.db
.execute("INSERT INTO \
1058 builds_comments(build_id, user_id, text, credit, time_created) \
1059 VALUES(%s, %s, %s, %s, NOW())",
1060 self
.id, user
.id, text
, credit
)
1062 # Update the credit cache.
1063 if not self
._credits
is None:
1064 self
._credits
+= credit
1066 # Send the new comment to all watchers and stuff.
1067 self
.send_comment_message(id)
1069 # Return the ID of the newly created comment.
1075 if self
._credits
is None:
1076 # Get the sum of the credits from the database.
1077 query
= self
.db
.get(
1078 "SELECT SUM(credit) as credits FROM builds_comments WHERE build_id = %s",
1082 self
._credits
= query
.credits
or 0
1084 return self
._credits
1091 def get_commenters(self
):
1092 users
= self
.db
.query("SELECT DISTINCT users.id AS id FROM builds_comments \
1093 JOIN users ON builds_comments.user_id = users.id \
1094 WHERE builds_comments.build_id = %s AND NOT users.deleted = 'Y' \
1095 AND NOT users.activated = 'Y' ORDER BY users.id", self
.id)
1097 return [users
.User(self
.pakfire
, u
.id) for u
in users
]
1099 def send_comment_message(self
, comment_id
):
1100 comment
= self
.db
.get("SELECT * FROM builds_comments WHERE id = %s",
1104 assert comment
.build_id
== self
.id
1106 # Get user who wrote the comment.
1107 user
= self
.pakfire
.users
.get_by_id(comment
.user_id
)
1110 "build_name" : self
.name
,
1111 "user_name" : user
.realname
,
1114 # XXX create beautiful message
1116 self
.pakfire
.messages
.send_to_all(self
.message_recipients
,
1117 N_("%(user_name)s commented on %(build_name)s"),
1118 comment
.text
, format
)
1122 def get_log(self
, comments
=True, repo
=True, limit
=None):
1126 created_entry
= logs
.CreatedLogEntry(self
.pakfire
, self
)
1127 entries
.append(created_entry
)
1130 entries
+= self
.get_comments(limit
=limit
)
1133 entries
+= self
.get_repo_moves(limit
=limit
)
1135 # Sort all entries in chronological order.
1139 entries
= entries
[:limit
]
1145 def get_watchers(self
):
1146 query
= self
.db
.query("SELECT DISTINCT users.id AS id FROM builds_watchers \
1147 JOIN users ON builds_watchers.user_id = users.id \
1148 WHERE builds_watchers.build_id = %s AND NOT users.deleted = 'Y' \
1149 AND users.activated = 'Y' ORDER BY users.id", self
.id)
1151 return [users
.User(self
.pakfire
, u
.id) for u
in query
]
1153 def add_watcher(self
, user
):
1154 # Don't add a user twice.
1155 if user
in self
.get_watchers():
1158 self
.db
.execute("INSERT INTO builds_watchers(build_id, user_id) \
1159 VALUES(%s, %s)", self
.id, user
.id)
1162 def message_recipients(self
):
1165 for watcher
in self
.get_watchers():
1166 ret
.append("%s <%s>" % (watcher
.realname
, watcher
.email
))
1172 if self
._update
is None:
1173 update
= self
.db
.get("SELECT update_id AS id FROM updates_builds \
1174 WHERE build_id = %s", self
.id)
1177 self
._update
= updates
.Update(self
.pakfire
, update
.id)
1183 if self
._repo
is None:
1184 repo
= self
.db
.get("SELECT repo_id AS id FROM repositories_builds \
1185 WHERE build_id = %s", self
.id)
1188 self
._repo
= repository
.Repository(self
.pakfire
, repo
.id)
1192 def get_repo_moves(self
, limit
=None):
1193 query
= "SELECT * FROM repositories_history \
1194 WHERE build_id = %s ORDER BY time ASC"
1197 for action
in self
.db
.query(query
, self
.id):
1198 action
= logs
.RepositoryLogEntry(self
.pakfire
, action
)
1199 actions
.append(action
)
1211 def repo_time(self
):
1212 repo
= self
.db
.get("SELECT time_added FROM repositories_builds \
1213 WHERE build_id = %s", self
.id)
1216 return repo
.time_added
1218 def get_auto_move(self
):
1219 return self
.data
.auto_move
== "Y"
1221 def set_auto_move(self
, state
):
1227 self
.db
.execute("UPDATE builds SET auto_move = %s WHERE id = %s", self
.id)
1229 self
._data
["auto_move"] = state
1231 auto_move
= property(get_auto_move
, set_auto_move
)
1234 def can_move_forward(self
):
1238 # If there is no next repository, we cannot move anything.
1239 if not self
.repo
.next
:
1242 # If the needed amount of score is reached, we can move forward.
1243 if self
.score
>= self
.repo
.next
.score_needed
:
1246 # If the repository does not require a minimal time,
1247 # we can move forward immediately.
1248 if not self
.repo
.time_min
:
1251 query
= self
.db
.get("SELECT NOW() - time_added AS duration FROM repositories_builds \
1252 WHERE build_id = %s", self
.id)
1253 duration
= query
.duration
1255 if duration
>= self
.repo
.time_min
:
1262 def get_bug_ids(self
):
1263 query
= self
.db
.query("SELECT bug_id FROM builds_bugs \
1264 WHERE build_id = %s", self
.id)
1266 return [b
.bug_id
for b
in query
]
1268 def add_bug(self
, bug_id
, user
=None, log
=True):
1269 # Check if this bug is already in the list of bugs.
1270 if bug_id
in self
.get_bug_ids():
1273 self
.db
.execute("INSERT INTO builds_bugs(build_id, bug_id) \
1274 VALUES(%s, %s)", self
.id, bug_id
)
1278 self
.log("bug_added", user
=user
, bug_id
=bug_id
)
1280 def rem_bug(self
, bug_id
, user
=None, log
=True):
1281 self
.db
.execute("DELETE FROM builds_bugs WHERE build_id = %s AND \
1282 bug_id = %s", self
.id, bug_id
)
1286 self
.log("bug_removed", user
=user
, bug_id
=bug_id
)
1288 def search_for_bugs(self
):
1292 pattern
= re
.compile(r
"(bug\s?|#)(\d+)")
1294 for txt
in (self
.commit
.subject
, self
.commit
.message
):
1295 for bug
in re
.finditer(pattern
, txt
):
1297 bugid
= int(bug
.group(2))
1301 # Check if a bug with the given ID exists in BZ.
1302 bug
= self
.pakfire
.bugzilla
.get_bug(bugid
)
1310 for bug_id
in self
.get_bug_ids():
1311 bug
= self
.pakfire
.bugzilla
.get_bug(bug_id
)
1319 def _update_bugs_helper(self
, repo
):
1321 This function takes a new status and generates messages that
1322 are appended to all bugs.
1325 kwargs
= BUG_MESSAGES
[repo
.type].copy()
1329 baseurl
= self
.pakfire
.settings
.get("baseurl", "")
1331 "build_url" : "%s/build/%s" % (baseurl
, self
.uuid
),
1332 "distro_name" : self
.distro
.name
,
1333 "package_name" : self
.name
,
1334 "repo_name" : repo
.name
,
1336 kwargs
["comment"] = kwargs
["comment"] % args
1338 self
.update_bugs(**kwargs
)
1340 def _update_bug(self
, bug_id
, status
=None, resolution
=None, comment
=None):
1341 self
.db
.execute("INSERT INTO builds_bugs_updates(bug_id, status, resolution, comment, time) \
1342 VALUES(%s, %s, %s, %s, NOW())", bug_id
, status
, resolution
, comment
)
1344 def update_bugs(self
, status
, resolution
=None, comment
=None):
1345 # Update all bugs linked to this build.
1346 for bug_id
in self
.get_bug_ids():
1347 self
._update
_bug
(bug_id
, status
=status
, resolution
=resolution
, comment
=comment
)
1350 class Jobs(base
.Object
):
1351 def _get_job(self
, query
, *args
):
1352 res
= self
.db
.get(query
, *args
)
1355 return Job(self
.backend
, res
.id, data
=res
)
1357 def _get_jobs(self
, query
, *args
):
1358 res
= self
.db
.query(query
, *args
)
1361 yield Job(self
.backend
, row
.id, data
=row
)
1363 def create(self
, build
, arch
, type="build"):
1364 job
= self
._get
_job
("INSERT INTO jobs(uuid, type, build_id, arch, time_created) \
1365 VALUES(%s, %s, %s, %s, NOW()) RETURNING *", "%s" % uuid
.uuid4(), type, build
.id, arch
)
1368 # Set cache for Build object.
1371 # Jobs are by default in state "new" and wait for being checked
1372 # for dependencies. Packages that do have no build dependencies
1373 # can directly be forwarded to "pending" state.
1374 if not job
.pkg
.requires
:
1375 job
.state
= "pending"
1379 def get_by_id(self
, id, data
=None):
1380 return Job(self
.pakfire
, id, data
)
1382 def get_by_uuid(self
, uuid
):
1383 job
= self
.db
.get("SELECT id FROM jobs WHERE uuid = %s", uuid
)
1386 return self
.get_by_id(job
.id)
1388 def get_by_build(self
, build_id
, build
=None, type=None):
1390 Get all jobs in the specifies build.
1392 query
= "SELECT * FROM jobs WHERE build_id = %s"
1396 query
+= " AND type = %s"
1399 # Get IDs of all builds in this group.
1401 for job
in self
.db
.query(query
, *args
):
1402 job
= Job(self
.pakfire
, job
.id, job
)
1404 # If the Build object was set, we set it so it won't be retrieved
1405 # from the database again.
1411 # Return sorted list of jobs.
1414 def get_active(self
, host_id
=None, builder
=None, states
=None):
1416 host_id
= builder
.id
1419 states
= ["dispatching", "running", "uploading"]
1421 query
= "SELECT * FROM jobs WHERE state IN (%s)" % ", ".join(["%s"] * len(states
))
1425 query
+= " AND builder_id = %s" % host_id
1427 query
+= " ORDER BY \
1429 WHEN jobs.state = 'running' THEN 0 \
1430 WHEN jobs.state = 'uploading' THEN 1 \
1431 WHEN jobs.state = 'dispatching' THEN 2 \
1432 WHEN jobs.state = 'pending' THEN 3 \
1433 WHEN jobs.state = 'new' THEN 4 \
1434 END, time_started ASC"
1436 return [Job(self
.pakfire
, j
.id, j
) for j
in self
.db
.query(query
, *args
)]
1438 def get_latest(self
, arch
=None, builder
=None, limit
=None, age
=None, date
=None):
1439 query
= "SELECT * FROM jobs"
1442 where
= ["(state = 'finished' OR state = 'failed' OR state = 'aborted')"]
1445 where
.append("arch = %s")
1449 where
.append("builder_id = %s")
1450 args
.append(builder
.id)
1454 year
, month
, day
= date
.split("-", 2)
1455 date
= datetime
.date(int(year
), int(month
), int(day
))
1459 where
.append("(DATE(time_created) = %s OR \
1460 DATE(time_started) = %s OR DATE(time_finished) = %s)")
1461 args
+= (date
, date
, date
)
1464 where
.append("time_finished >= NOW() - '%s'::interval" % age
)
1467 query
+= " WHERE %s" % " AND ".join(where
)
1469 query
+= " ORDER BY time_finished DESC"
1472 query
+= " LIMIT %s"
1475 return [Job(self
.pakfire
, j
.id, j
) for j
in self
.db
.query(query
, *args
)]
1477 def get_average_build_time(self
):
1479 Returns the average build time of all finished builds from the
1482 result
= self
.db
.get("SELECT AVG(time_finished - time_started) as average \
1483 FROM jobs WHERE type = 'build' AND state = 'finished' AND \
1484 time_finished >= NOW() - '3 months'::interval")
1487 return result
.average
1489 def count(self
, *states
):
1490 query
= "SELECT COUNT(*) AS count FROM jobs"
1494 query
+= " WHERE state IN %s"
1497 jobs
= self
.db
.get(query
, *args
)
1502 class Job(base
.DataObject
):
1506 return "<%s id=%s %s>" % (self
.__class
__.__name
__, self
.id, self
.name
)
1508 def __eq__(self
, other
):
1509 if isinstance(other
, self
.__class
__):
1510 return self
.id == other
.id
1512 def __lt__(self
, other
):
1513 if isinstance(other
, self
.__class
__):
1514 if (self
.type, other
.type) == ("build", "test"):
1517 if self
.build
== other
.build
:
1518 return arches
.priority(self
.arch
) < arches
.priority(other
.arch
)
1520 return self
.time_created
< other
.time_created
1523 packages
= self
.backend
.packages
._get
_packages
("SELECT packages.* FROM jobs_packages \
1524 LEFT JOIN packages ON jobs_packages.pkg_id = packages.id \
1525 WHERE jobs_packages.job_id = %s ORDER BY packages.name", self
.id)
1527 return iter(packages
)
1529 def __nonzero__(self
):
1533 res
= self
.db
.get("SELECT COUNT(*) AS len FROM jobs_packages \
1534 WHERE job_id = %s", self
.id)
1540 return self
.build
.distro
1543 self
.__delete
_buildroots
()
1544 self
.__delete
_history
()
1545 self
.__delete
_packages
()
1546 self
.__delete
_logfiles
()
1548 # Delete the job itself.
1549 self
.db
.execute("DELETE FROM jobs WHERE id = %s", self
.id)
1551 def __delete_buildroots(self
):
1553 Removes all buildroots.
1555 self
.db
.execute("DELETE FROM jobs_buildroots WHERE job_id = %s", self
.id)
1557 def __delete_history(self
):
1559 Removes all references in the history to this build job.
1561 self
.db
.execute("DELETE FROM jobs_history WHERE job_id = %s", self
.id)
1563 def __delete_packages(self
):
1565 Deletes all uploaded files from the job.
1567 for pkg
in self
.packages
:
1570 self
.db
.execute("DELETE FROM jobs_packages WHERE job_id = %s", self
.id)
1572 def __delete_logfiles(self
):
1573 for logfile
in self
.logfiles
:
1574 self
.db
.execute("INSERT INTO queue_delete(path) VALUES(%s)", logfile
.path
)
1576 def reset(self
, user
=None):
1577 self
.__delete
_buildroots
()
1578 self
.__delete
_packages
()
1579 self
.__delete
_history
()
1580 self
.__delete
_logfiles
()
1583 self
.log("reset", user
=user
)
1587 def log(self
, action
, user
=None, state
=None, builder
=None, test_job
=None):
1594 builder_id
= builder
.id
1598 test_job_id
= test_job
.id
1600 self
.db
.execute("INSERT INTO jobs_history(job_id, action, state, user_id, \
1601 time, builder_id, test_job_id) VALUES(%s, %s, %s, %s, NOW(), %s, %s)",
1602 self
.id, action
, state
, user_id
, builder_id
, test_job_id
)
1604 def get_log(self
, limit
=None, offset
=None, user
=None):
1605 query
= "SELECT * FROM jobs_history"
1607 conditions
= ["job_id = %s",]
1611 conditions
.append("user_id = %s")
1612 args
.append(user
.id)
1615 query
+= " WHERE %s" % " AND ".join(conditions
)
1617 query
+= " ORDER BY time DESC"
1621 query
+= " LIMIT %s,%s"
1622 args
+= [offset
, limit
,]
1624 query
+= " LIMIT %s"
1628 for entry
in self
.db
.query(query
, *args
):
1629 entry
= logs
.JobLogEntry(self
.pakfire
, entry
)
1630 entries
.append(entry
)
1636 return self
.data
.uuid
1640 return self
.data
.type
1644 return self
.data
.build_id
1648 return self
.pakfire
.builds
.get_by_id(self
.build_id
)
1651 def related_jobs(self
):
1654 for job
in self
.build
.jobs
:
1664 return self
.build
.pkg
1668 return "%s-%s.%s" % (self
.pkg
.name
, self
.pkg
.friendly_version
, self
.arch
)
1672 return sum((p
.size
for p
in self
.packages
))
1677 Returns the rank in the build queue
1679 if not self
.state
== "pending":
1682 res
= self
.db
.get("SELECT rank FROM jobs_queue WHERE job_id = %s", self
.id)
1687 def is_running(self
):
1689 Returns True if job is in a running state.
1691 return self
.state
in ("pending", "dispatching", "running", "uploading")
1693 def get_state(self
):
1694 return self
.data
.state
1696 def set_state(self
, state
, user
=None, log
=True):
1697 # Nothing to do if the state remains.
1698 if not self
.state
== state
:
1699 self
.db
.execute("UPDATE jobs SET state = %s WHERE id = %s", state
, self
.id)
1702 if log
and not state
== "new":
1703 self
.log("state_change", state
=state
, user
=user
)
1707 self
._data
["state"] = state
1709 # Always clear the message when the status is changed.
1710 self
.update_message(None)
1712 # Update some more informations.
1713 if state
== "dispatching":
1715 self
.db
.execute("UPDATE jobs SET time_started = NOW(), time_finished = NULL \
1716 WHERE id = %s", self
.id)
1718 elif state
== "pending":
1719 self
.db
.execute("UPDATE jobs SET tries = tries + 1, time_started = NULL, \
1720 time_finished = NULL WHERE id = %s", self
.id)
1722 elif state
in ("aborted", "dependency_error", "finished", "failed"):
1723 # Set finish time and reset builder..
1724 self
.db
.execute("UPDATE jobs SET time_finished = NOW() WHERE id = %s", self
.id)
1726 # Send messages to the user.
1727 if state
== "finished":
1728 self
.send_finished_message()
1730 elif state
== "failed":
1731 # Remove all package files if a job is set to failed state.
1732 self
.__delete
_packages
()
1734 self
.send_failed_message()
1736 # Automatically update the state of the build (not on test builds).
1737 if self
.type == "build":
1738 self
.build
.auto_update_state()
1740 state
= property(get_state
, set_state
)
1744 return self
.data
.message
1746 def update_message(self
, msg
):
1747 self
.db
.execute("UPDATE jobs SET message = %s WHERE id = %s",
1751 self
._data
["message"] = msg
1753 def get_builder(self
):
1754 if self
.data
.builder_id
:
1755 return self
.backend
.builders
.get_by_id(self
.data
.builder_id
)
1757 def set_builder(self
, builder
, user
=None):
1758 self
.db
.execute("UPDATE jobs SET builder_id = %s WHERE id = %s",
1759 builder
.id, self
.id)
1763 self
._data
["builder_id"] = builder
.id
1765 self
._builder
= builder
1769 self
.log("builder_assigned", builder
=builder
, user
=user
)
1771 builder
= lazy_property(get_builder
, set_builder
)
1775 return self
.data
.arch
1779 if not self
.time_started
:
1782 if self
.time_finished
:
1783 delta
= self
.time_finished
- self
.time_started
1785 delta
= datetime
.datetime
.utcnow() - self
.time_started
1787 return delta
.total_seconds()
1790 def time_created(self
):
1791 return self
.data
.time_created
1794 def time_started(self
):
1795 return self
.data
.time_started
1798 def time_finished(self
):
1799 return self
.data
.time_finished
1802 def expected_runtime(self
):
1804 Returns the estimated time and stddev, this job takes to finish.
1806 # Get the average build time.
1807 build_times
= self
.pakfire
.builds
.get_build_times_by_arch(self
.arch
,
1810 # If there is no statistical data, we cannot estimate anything.
1814 return build_times
.average
, build_times
.stddev
1818 expected_runtime
, stddev
= self
.expected_runtime
1820 if expected_runtime
:
1821 return expected_runtime
- int(self
.duration
), stddev
1825 return self
.data
.tries
1827 def get_pkg_by_uuid(self
, uuid
):
1828 pkg
= self
.backend
.packages
._get
_package
("SELECT packages.id FROM packages \
1829 JOIN jobs_packages ON jobs_packages.pkg_id = packages.id \
1830 WHERE jobs_packages.job_id = %s AND packages.uuid = %s",
1841 for log
in self
.db
.query("SELECT id FROM logfiles WHERE job_id = %s", self
.id):
1842 log
= logs
.LogFile(self
.pakfire
, log
.id)
1845 logfiles
.append(log
)
1849 def add_file(self
, filename
):
1851 Add the specified file to this job.
1853 The file is copied to the right directory by this function.
1855 assert os
.path
.exists(filename
)
1857 if filename
.endswith(".log"):
1858 self
._add
_file
_log
(filename
)
1860 elif filename
.endswith(".%s" % PACKAGE_EXTENSION
):
1861 # It is not allowed to upload packages on test builds.
1862 if self
.type == "test":
1865 self
._add
_file
_package
(filename
)
1867 def _add_file_log(self
, filename
):
1869 Attach a log file to this job.
1871 target_dirname
= os
.path
.join(self
.build
.path
, "logs")
1873 if self
.type == "test":
1876 target_filename
= os
.path
.join(target_dirname
,
1877 "test.%s.%s.%s.log" % (self
.arch
, i
, self
.tries
))
1879 if os
.path
.exists(target_filename
):
1884 target_filename
= os
.path
.join(target_dirname
,
1885 "build.%s.%s.log" % (self
.arch
, self
.tries
))
1887 # Make sure the target directory exists.
1888 if not os
.path
.exists(target_dirname
):
1889 os
.makedirs(target_dirname
)
1891 # Calculate a SHA512 hash from that file.
1892 f
= open(filename
, "rb")
1893 h
= hashlib
.sha512()
1895 buf
= f
.read(BUFFER_SIZE
)
1902 # Copy the file to the final location.
1903 shutil
.copy2(filename
, target_filename
)
1905 # Create an entry in the database.
1906 self
.db
.execute("INSERT INTO logfiles(job_id, path, filesize, hash_sha512) \
1907 VALUES(%s, %s, %s, %s)", self
.id, os
.path
.relpath(target_filename
, PACKAGES_DIR
),
1908 os
.path
.getsize(target_filename
), h
.hexdigest())
1910 def _add_file_package(self
, filename
):
1911 # Open package (creates entry in the database).
1912 pkg
= packages
.Package
.open(self
.pakfire
, filename
)
1914 # Move package to the build directory.
1915 pkg
.move(os
.path
.join(self
.build
.path
, self
.arch
))
1917 # Attach the package to this job.
1918 self
.db
.execute("INSERT INTO jobs_packages(job_id, pkg_id) VALUES(%s, %s)",
1921 def get_aborted_state(self
):
1922 return self
.data
.aborted_state
1924 def set_aborted_state(self
, state
):
1925 self
._set
_attribute
("aborted_state", state
)
1927 aborted_state
= property(get_aborted_state
, set_aborted_state
)
1930 def message_recipients(self
):
1933 # Add all people watching the build.
1934 l
+= self
.build
.message_recipients
1936 # Add the package maintainer on release builds.
1937 if self
.build
.type == "release":
1938 maint
= self
.pkg
.maintainer
1940 if isinstance(maint
, users
.User
):
1941 l
.append("%s <%s>" % (maint
.realname
, maint
.email
))
1945 # XXX add committer and commit author.
1947 # Add the owner of the scratch build on scratch builds.
1948 elif self
.build
.type == "scratch" and self
.build
.user
:
1949 l
.append("%s <%s>" % \
1950 (self
.build
.user
.realname
, self
.build
.user
.email
))
1954 def save_buildroot(self
, pkgs
):
1957 for pkg_name
, pkg_uuid
in pkgs
:
1958 rows
.append((self
.id, self
.tries
, pkg_uuid
, pkg_name
))
1960 # Cleanup old stuff first (for rebuilding packages).
1961 self
.db
.execute("DELETE FROM jobs_buildroots WHERE job_id = %s AND tries = %s",
1962 self
.id, self
.tries
)
1964 self
.db
.executemany("INSERT INTO \
1965 jobs_buildroots(job_id, tries, pkg_uuid, pkg_name) \
1966 VALUES(%s, %s, %s, %s)", rows
)
1968 def has_buildroot(self
, tries
=None):
1972 res
= self
.db
.get("SELECT COUNT(*) AS num FROM jobs_buildroots \
1973 WHERE jobs_buildroots.job_id = %s AND jobs_buildroots.tries = %s",
1981 def get_buildroot(self
, tries
=None):
1985 rows
= self
.db
.query("SELECT * FROM jobs_buildroots \
1986 WHERE jobs_buildroots.job_id = %s AND jobs_buildroots.tries = %s \
1987 ORDER BY pkg_name", self
.id, tries
)
1991 # Search for this package in the packages table.
1992 pkg
= self
.pakfire
.packages
.get_by_uuid(row
.pkg_uuid
)
1993 pkgs
.append((row
.pkg_name
, row
.pkg_uuid
, pkg
))
1997 def send_finished_message(self
):
1998 # Send no finished mails for test jobs.
1999 if self
.type == "test":
2002 logging
.debug("Sending finished message for job %s to %s" % \
2003 (self
.name
, ", ".join(self
.message_recipients
)))
2006 "build_name" : self
.name
,
2007 "build_host" : self
.builder
.name
,
2008 "build_uuid" : self
.uuid
,
2011 self
.pakfire
.messages
.send_to_all(self
.message_recipients
,
2012 MSG_BUILD_FINISHED_SUBJECT
, MSG_BUILD_FINISHED
, info
)
2014 def send_failed_message(self
):
2015 logging
.debug("Sending failed message for job %s to %s" % \
2016 (self
.name
, ", ".join(self
.message_recipients
)))
2020 build_host
= self
.builder
.name
2023 "build_name" : self
.name
,
2024 "build_host" : build_host
,
2025 "build_uuid" : self
.uuid
,
2028 self
.pakfire
.messages
.send_to_all(self
.message_recipients
,
2029 MSG_BUILD_FAILED_SUBJECT
, MSG_BUILD_FAILED
, info
)
2031 def set_start_time(self
, start_time
):
2032 if start_time
is None:
2035 self
.db
.execute("UPDATE jobs SET start_not_before = NOW() + %s \
2036 WHERE id = %s LIMIT 1", start_time
, self
.id)
2038 def schedule(self
, type, start_time
=None, user
=None):
2039 assert type in ("rebuild", "test")
2041 if type == "rebuild":
2042 if self
.state
== "finished":
2045 self
.set_state("new", user
=user
, log
=False)
2046 self
.set_start_time(start_time
)
2049 self
.log("schedule_rebuild", user
=user
)
2051 elif type == "test":
2052 if not self
.state
== "finished":
2055 # Create a new job with same build and arch.
2056 job
= self
.create(self
.pakfire
, self
.build
, self
.arch
, type="test")
2057 job
.set_start_time(start_time
)
2060 self
.log("schedule_test_job", test_job
=job
, user
=user
)
2064 def schedule_test(self
, start_not_before
=None, user
=None):
2066 return self
.schedule("test", start_time
=start_not_before
, user
=user
)
2068 def schedule_rebuild(self
, start_not_before
=None, user
=None):
2070 return self
.schedule("rebuild", start_time
=start_not_before
, user
=user
)
2072 def get_build_repos(self
):
2074 Returns a list of all repositories that should be used when
2077 repo_ids
= self
.db
.query("SELECT repo_id FROM jobs_repos WHERE job_id = %s",
2081 return self
.distro
.get_build_repos()
2084 for repo
in self
.distro
.repositories
:
2085 if repo
.id in [r
.id for r
in repo_ids
]:
2088 return repos
or self
.distro
.get_build_repos()
2090 def get_repo_config(self
):
2092 Get repository configuration file that is sent to the builder.
2096 for repo
in self
.get_build_repos():
2097 confs
.append(repo
.get_conf())
2099 return "\n\n".join(confs
)
2101 def get_config(self
):
2103 Get configuration file that is sent to the builder.
2107 # Add the distribution configuration.
2108 confs
.append(self
.distro
.get_config())
2110 # Then add all repositories for this build.
2111 confs
.append(self
.get_repo_config())
2113 return "\n\n".join(confs
)
2115 def resolvdep(self
):
2116 config
= pakfire
.config
.Config(files
=["general.conf"])
2117 config
.parse(self
.get_config())
2119 # The filename of the source file.
2120 filename
= os
.path
.join(PACKAGES_DIR
, self
.build
.pkg
.path
)
2121 assert os
.path
.exists(filename
), filename
2123 # Create a new pakfire instance with the configuration for
2125 p
= pakfire
.PakfireServer(config
=config
, arch
=self
.arch
)
2127 # Try to solve the build dependencies.
2129 solver
= p
.resolvdep(filename
)
2131 # Catch dependency errors and log the problem string.
2132 except DependencyError
, e
:
2133 self
.state
= "dependency_error"
2134 self
.update_message(e
)
2137 # If the build dependencies can be resolved, we set the build in
2139 if solver
.status
is True:
2140 if self
.state
in ("failed",):
2143 self
.state
= "pending"