13 import pakfire
.packages
16 from . import builders
18 from . import packages
19 from . import repository
23 from .constants
import *
24 from .decorators
import *
26 def import_from_package(_pakfire
, filename
, distro
=None, commit
=None, type="release",
27 arches
=None, check_for_duplicates
=True, owner
=None):
30 distro
= commit
.source
.distro
34 # Open the package file to read some basic information.
35 pkg
= pakfire
.packages
.open(None, None, filename
)
37 if check_for_duplicates
:
38 if distro
.has_package(pkg
.name
, pkg
.epoch
, pkg
.version
, pkg
.release
):
39 logging
.warning("Duplicate package detected: %s. Skipping." % pkg
)
42 # Open the package and add it to the database.
43 pkg
= packages
.Package
.open(_pakfire
, filename
)
44 logging
.debug("Created new package: %s" % pkg
)
46 # Associate the package to the processed commit.
50 # Create a new build object from the package which
51 # is always a release build.
52 build
= Build
.create(_pakfire
, pkg
, type=type, owner
=owner
, distro
=distro
)
53 logging
.debug("Created new build job: %s" % build
)
55 # Create all automatic jobs.
56 build
.create_autojobs(arches
=arches
)
61 class Builds(base
.Object
):
62 def get_by_id(self
, id, data
=None):
63 return Build(self
.pakfire
, id, data
=data
)
65 def get_by_uuid(self
, uuid
):
66 build
= self
.db
.get("SELECT id FROM builds WHERE uuid = %s LIMIT 1", uuid
)
69 return self
.get_by_id(build
.id)
71 def get_all(self
, limit
=50):
72 query
= "SELECT * FROM builds ORDER BY time_created DESC"
75 query
+= " LIMIT %d" % limit
77 return [self
.get_by_id(b
.id, b
) for b
in self
.db
.query(query
)]
79 def get_by_user(self
, user
, type=None, public
=None):
83 if not type or type == "scratch":
84 # On scratch builds the user id equals the owner id.
85 conditions
.append("(builds.type = 'scratch' AND owner_id = %s)")
88 elif not type or type == "release":
92 conditions
.append("public = 'Y'")
94 conditions
.append("public = 'N'")
96 query
= "SELECT builds.* AS id FROM builds \
97 JOIN packages ON builds.pkg_id = packages.id"
100 query
+= " WHERE %s" % " AND ".join(conditions
)
102 query
+= " ORDER BY builds.time_created DESC"
105 for build
in self
.db
.query(query
, *args
):
106 build
= Build(self
.pakfire
, build
.id, build
)
111 def get_by_name(self
, name
, type=None, public
=None, user
=None, limit
=None, offset
=None):
114 "packages.name = %s",
118 conditions
.append("builds.type = %s")
123 or_conditions
.append("public = 'Y'")
124 elif public
is False:
125 or_conditions
.append("public = 'N'")
127 if user
and not user
.is_admin():
128 or_conditions
.append("builds.owner_id = %s")
131 query
= "SELECT builds.* AS id FROM builds \
132 JOIN packages ON builds.pkg_id = packages.id"
135 conditions
.append(" OR ".join(or_conditions
))
138 query
+= " WHERE %s" % " AND ".join(conditions
)
140 if type == "release":
141 query
+= " ORDER BY packages.name,packages.epoch,packages.version,packages.release,id ASC"
142 elif type == "scratch":
143 query
+= " ORDER BY time_created DESC"
147 query
+= " LIMIT %s,%s"
148 args
.extend([offset
, limit
])
153 return [Build(self
.pakfire
, b
.id, b
) for b
in self
.db
.query(query
, *args
)]
155 def get_latest_by_name(self
, name
, type=None, public
=None):
157 SELECT * FROM builds \
158 LEFT JOIN builds_latest ON builds.id = builds_latest.build_id \
159 WHERE builds_latest.package_name = %s"
163 query
+= " AND builds_latest.build_type = %s"
167 query
+= " AND builds.public = %s"
169 elif public
is False:
170 query
+= " AND builds.public = %s"
173 # Get the last one only.
174 # Prefer release builds over scratch builds.
177 CASE builds.type WHEN 'release' THEN 0 ELSE 1 END, \
178 builds.time_created DESC \
181 res
= self
.db
.get(query
, *args
)
184 return Build(self
.pakfire
, res
.id, res
)
186 def get_active_builds(self
, name
, public
=None):
188 SELECT * FROM builds \
189 LEFT JOIN builds_latest ON builds.id = builds_latest.build_id \
190 WHERE builds_latest.package_name = %s AND builds.type = %s"
191 args
= [name
, "release"]
194 query
+= " AND builds.public = %s"
196 elif public
is False:
197 query
+= " AND builds.public = %s"
201 for row
in self
.db
.query(query
, *args
):
202 b
= Build(self
.pakfire
, row
.id, row
)
205 # Sort the result. Lastest build first.
206 builds
.sort(reverse
=True)
211 builds
= self
.db
.get("SELECT COUNT(*) AS count FROM builds")
215 def needs_test(self
, threshold
, arch
, limit
=None, randomize
=False):
216 query
= "SELECT id FROM builds \
218 (SELECT * FROM jobs WHERE \
219 jobs.build_id = builds.id AND \
221 (jobs.state != 'finished' OR \
222 jobs.time_finished >= %s) \
225 (SELECT * FROM jobs WHERE \
226 jobs.build_id = builds.id AND \
228 jobs.type = 'build' AND \
229 jobs.state = 'finished' AND \
230 jobs.time_finished < %s \
232 AND builds.type = 'release' \
233 AND (builds.state = 'stable' OR builds.state = 'testing')"
234 args
= [arch
, threshold
, arch
, threshold
]
237 query
+= " ORDER BY RAND()"
243 return [Build(self
.pakfire
, b
.id) for b
in self
.db
.query(query
, *args
)]
245 def get_obsolete(self
, repo
=None):
247 Get all obsoleted builds.
249 If repo is True: which are in any repository.
250 If repo is some Repository object: which are in this repository.
255 query
= "SELECT id FROM builds WHERE state = 'obsolete'"
258 query
= "SELECT build_id AS id FROM repositories_builds \
259 JOIN builds ON builds.id = repositories_builds.build_id \
260 WHERE builds.state = 'obsolete'"
262 if repo
and not repo
is True:
263 query
+= " AND repositories_builds.repo_id = %s"
266 res
= self
.db
.query(query
, *args
)
270 build
= Build(self
.pakfire
, build
.id)
275 def get_changelog(self
, name
, public
=None, limit
=5, offset
=0):
276 query
= "SELECT builds.* FROM builds \
277 JOIN packages ON builds.pkg_id = packages.id \
282 args
= ["release", name
,]
285 query
+= " AND builds.public = %s"
287 elif public
== False:
288 query
+= " AND builds.public = %s"
291 query
+= " ORDER BY builds.time_created DESC"
295 query
+= " LIMIT %s,%s"
296 args
+= [offset
, limit
]
302 for b
in self
.db
.query(query
, *args
):
303 b
= Build(self
.pakfire
, b
.id, b
)
306 builds
.sort(reverse
=True)
310 def get_comments(self
, limit
=10, offset
=None, user
=None):
311 query
= "SELECT * FROM builds_comments \
312 JOIN users ON builds_comments.user_id = users.id"
317 wheres
.append("users.id = %s")
321 query
+= " WHERE %s" % " AND ".join(wheres
)
324 query
+= " ORDER BY time_created DESC"
329 query
+= " LIMIT %s,%s"
337 for comment
in self
.db
.query(query
, *args
):
338 comment
= logs
.CommentLogEntry(self
.pakfire
, comment
)
339 comments
.append(comment
)
343 def get_build_times_summary(self
, name
=None, job_type
=None, arch
=None):
346 builds_times.arch AS arch, \
347 MAX(duration) AS maximum, \
348 MIN(duration) AS minimum, \
349 AVG(duration) AS average, \
350 SUM(duration) AS sum, \
351 STDDEV_POP(duration) AS stddev \
353 LEFT JOIN builds ON builds_times.build_id = builds.id \
354 LEFT JOIN packages ON builds.pkg_id = packages.id"
361 conditions
.append("packages.name = %s")
364 # Filter by job types.
366 conditions
.append("builds_times.job_type = %s")
367 args
.append(job_type
)
371 conditions
.append("builds_times.arch = %s")
376 query
+= " WHERE %s" % " AND ".join(conditions
)
378 # Grouping and sorting.
379 query
+= " GROUP BY arch ORDER BY arch DESC"
381 return self
.db
.query(query
, *args
)
383 def get_build_times_by_arch(self
, arch
, **kwargs
):
388 build_times
= self
.get_build_times_summary(**kwargs
)
390 return build_times
[0]
392 def get_types_stats(self
):
393 res
= self
.db
.query("SELECT type, COUNT(*) AS count FROM builds GROUP BY type")
400 ret
[row
.type] = row
.count
405 class Build(base
.Object
):
406 def __init__(self
, pakfire
, id, data
=None):
407 base
.Object
.__init
__(self
, pakfire
)
415 self
._jobs
_test
= None
416 self
._depends
_on
= None
425 return "<%s id=%s %s>" % (self
.__class
__.__name
__, self
.id, self
.pkg
)
427 def __cmp__(self
, other
):
431 return cmp(self
.pkg
, other
.pkg
)
434 def create(cls
, pakfire
, pkg
, type="release", owner
=None, distro
=None, public
=True):
435 assert type in ("release", "scratch", "test")
436 assert distro
, "You need to specify the distribution of this build."
443 # Check if scratch build has an owner.
444 if type == "scratch" and not owner
:
445 raise Exception, "Scratch builds require an owner"
447 # Set the default priority of this build.
448 if type == "release":
451 elif type == "scratch":
457 id = pakfire
.db
.execute("""
458 INSERT INTO builds(uuid, pkg_id, type, distro_id, time_created, public, priority)
459 VALUES(%s, %s, %s, %s, NOW(), %s, %s)""", "%s" % uuid
.uuid4(), pkg
.id,
460 type, distro
.id, public
, priority
)
462 # Set the owner of this buildgroup.
464 pakfire
.db
.execute("UPDATE builds SET owner_id = %s WHERE id = %s",
467 build
= cls(pakfire
, id)
469 # Log that the build has been created.
470 build
.log("created", user
=owner
)
472 # Create directory where the files live.
473 if not os
.path
.exists(build
.path
):
474 os
.makedirs(build
.path
)
476 # Move package file to the directory of the build.
477 source_path
= os
.path
.join(build
.path
, "src")
478 build
.pkg
.move(source_path
)
480 # Generate an update id.
481 build
.generate_update_id()
483 # Obsolete all other builds with the same name to track updates.
484 build
.obsolete_others()
486 # Search for possible bug IDs in the commit message.
487 build
.search_for_bugs()
493 Deletes this build including all jobs, packages and the source
496 # If the build is in a repository, we need to remove it.
498 self
.repo
.rem_build(self
)
500 for job
in self
.jobs
+ self
.test_jobs
:
506 # Delete everything related to this build.
508 self
.__delete
_comments
()
509 self
.__delete
_history
()
510 self
.__delete
_watchers
()
512 # Delete the build itself.
513 self
.db
.execute("DELETE FROM builds WHERE id = %s", self
.id)
515 def __delete_bugs(self
):
517 Delete all associated bugs.
519 self
.db
.execute("DELETE FROM builds_bugs WHERE build_id = %s", self
.id)
521 def __delete_comments(self
):
525 self
.db
.execute("DELETE FROM builds_comments WHERE build_id = %s", self
.id)
527 def __delete_history(self
):
529 Delete the repository history.
531 self
.db
.execute("DELETE FROM repositories_history WHERE build_id = %s", self
.id)
533 def __delete_watchers(self
):
537 self
.db
.execute("DELETE FROM builds_watchers WHERE build_id = %s", self
.id)
541 Resets the whole build so it can start again (as it has never
544 for job
in self
.jobs
:
547 #self.__delete_bugs()
548 self
.__delete
_comments
()
549 self
.__delete
_history
()
550 self
.__delete
_watchers
()
552 self
.state
= "building"
559 Lazy fetching of data for this object.
561 if self
._data
is None:
562 self
._data
= self
.db
.get("SELECT * FROM builds WHERE id = %s", self
.id)
570 A set of information that is sent to the XMLRPC client.
572 return { "uuid" : self
.uuid
}
574 def log(self
, action
, user
=None, bug_id
=None):
579 self
.db
.execute("INSERT INTO builds_history(build_id, action, user_id, time, bug_id) \
580 VALUES(%s, %s, %s, NOW(), %s)", self
.id, action
, user_id
, bug_id
)
585 The UUID of this build.
587 return self
.data
.uuid
592 Get package that is to be built in the build.
594 if self
._pkg
is None:
595 self
._pkg
= packages
.Package(self
.pakfire
, self
.data
.pkg_id
)
601 return "%s-%s" % (self
.pkg
.name
, self
.pkg
.friendly_version
)
606 The type of this build.
608 return self
.data
.type
613 The ID of the owner of this build.
615 return self
.data
.owner_id
620 The owner of this build.
622 if not self
.owner_id
:
625 if self
._owner
is None:
626 self
._owner
= self
.pakfire
.users
.get_by_id(self
.owner_id
)
633 return self
.data
.distro_id
637 if self
._distro
is None:
638 self
._distro
= self
.pakfire
.distros
.get_by_id(self
.distro_id
)
645 if self
.type == "scratch":
648 def get_depends_on(self
):
649 if self
.data
.depends_on
and self
._depends
_on
is None:
650 self
._depends
_on
= Build(self
.pakfire
, self
.data
.depends_on
)
652 return self
._depends
_on
654 def set_depends_on(self
, build
):
655 self
.db
.execute("UPDATE builds SET depends_on = %s WHERE id = %s",
659 self
._depends
_on
= build
660 self
._data
["depends_on"] = build
.id
662 depends_on
= property(get_depends_on
, set_depends_on
)
666 return self
.data
.time_created
670 return self
.created
.date()
675 Is this build public?
677 return self
.data
.public
== "Y"
682 Returns the size on disk of this build.
686 # Add the source package.
691 s
+= sum((j
.size
for j
in self
.jobs
))
697 # # Cache all states.
698 # states = [j.state for j in self.jobs]
700 # target_state = "unknown"
702 # # If at least one job has failed, the whole build has failed.
703 # if "failed" in states:
704 # target_state = "failed"
706 # # It at least one of the jobs is still running, the whole
707 # # build is in running state.
708 # elif "running" in states:
709 # target_state = "running"
711 # # If all jobs are in the finished state, we turn into finished
713 # elif all([s == "finished" for s in states]):
714 # target_state = "finished"
716 # return target_state
718 def auto_update_state(self
):
720 Check if the state of this build can be updated and perform
721 the change if possible.
723 # Do not change the broken/obsolete state automatically.
724 if self
.state
in ("broken", "obsolete"):
727 if self
.repo
and self
.repo
.type == "stable":
728 self
.update_state("stable")
731 # If any of the build jobs are finished, the build will be put in testing
733 for job
in self
.jobs
:
734 if job
.state
== "finished":
735 self
.update_state("testing")
738 def update_state(self
, state
, user
=None, remove
=False):
739 assert state
in ("stable", "testing", "obsolete", "broken")
741 self
.db
.execute("UPDATE builds SET state = %s WHERE id = %s", state
, self
.id)
744 self
._data
["state"] = state
746 # In broken state, the removal from the repository is forced and
747 # all jobs that are not finished yet will be aborted.
748 if state
== "broken":
751 for job
in self
.jobs
:
752 if job
.state
in ("new", "pending", "running", "dependency_error"):
753 job
.state
= "aborted"
755 # If this build is in a repository, it will leave it.
756 if remove
and self
.repo
:
757 self
.repo
.rem_build(self
)
759 # If a release build is now in testing state, we put it into the
760 # first repository of the distribution.
761 elif self
.type == "release" and state
== "testing":
762 # If the build is not in a repository, yet and if there is
763 # a first repository, we put the build there.
764 if not self
.repo
and self
.distro
.first_repo
:
765 self
.distro
.first_repo
.add_build(self
, user
=user
)
769 return self
.data
.state
772 return self
.state
== "broken"
774 def obsolete_others(self
):
775 if not self
.type == "release":
778 for build
in self
.pakfire
.builds
.get_by_name(self
.pkg
.name
, type="release"):
779 # Don't modify ourself.
780 if self
.id == build
.id:
783 # Don't touch broken builds.
784 if build
.state
in ("obsolete", "broken"):
787 # Obsolete the build.
788 build
.update_state("obsolete")
790 def set_severity(self
, severity
):
791 self
.db
.execute("UPDATE builds SET severity = %s WHERE id = %s", state
, self
.id)
794 self
._data
["severity"] = severity
796 def get_severity(self
):
797 return self
.data
.severity
799 severity
= property(get_severity
, set_severity
)
803 if self
.pkg
and self
.pkg
.commit
:
804 return self
.pkg
.commit
806 def update_message(self
, msg
):
807 self
.db
.execute("UPDATE builds SET message = %s WHERE id = %s", msg
, self
.id)
810 self
._data
["message"] = msg
812 def has_perm(self
, user
):
814 Check, if the given user has the right to perform administrative
815 operations on this build.
823 # Check if the user is allowed to manage packages from the critical path.
824 if self
.critical_path
and not user
.has_perm("manage_critical_path"):
827 # Search for maintainers...
830 if self
.type == "scratch":
831 # The owner of a scratch build has the right to do anything with it.
832 if self
.owner_id
== user
.id:
836 elif self
.type == "release":
837 # The maintainer also is allowed to manage the build.
838 if self
.pkg
.maintainer
== user
:
841 # Deny permission for all other cases.
848 if self
.data
.message
:
849 message
= self
.data
.message
852 if self
.commit
.message
:
853 message
= "\n".join((self
.commit
.subject
, self
.commit
.message
))
855 message
= self
.commit
.subject
857 prefix
= "%s: " % self
.pkg
.name
858 if message
.startswith(prefix
):
859 message
= message
[len(prefix
):]
863 def get_priority(self
):
864 return self
.data
.priority
866 def set_priority(self
, priority
):
867 assert priority
in (-2, -1, 0, 1, 2)
869 self
.db
.execute("UPDATE builds SET priority = %s WHERE id = %s", priority
,
873 self
._data
["priority"] = priority
875 priority
= property(get_priority
, set_priority
)
880 if self
.type == "scratch":
881 path
.append(BUILD_SCRATCH_DIR
)
882 path
.append(self
.uuid
)
884 elif self
.type == "release":
885 path
.append(BUILD_RELEASE_DIR
)
886 path
.append("%s/%s-%s-%s" % \
887 (self
.pkg
.name
, self
.pkg
.epoch
, self
.pkg
.version
, self
.pkg
.release
))
890 raise Exception, "Unknown build type: %s" % self
.type
892 return os
.path
.join(*path
)
895 def source_filename(self
):
896 return os
.path
.basename(self
.pkg
.path
)
899 def download_prefix(self
):
900 return "/".join((self
.pakfire
.settings
.get("download_baseurl"), "packages"))
903 def source_download(self
):
904 return "/".join((self
.download_prefix
, self
.pkg
.path
))
907 def source_hash_sha512(self
):
908 return self
.pkg
.hash_sha512
912 # XXX maybe this should rather live in a uimodule.
913 # zlib-1.2.3-2.ip3 [src, i686, blah...]
914 s
= """<a class="state_%s %s" href="/build/%s">%s</a>""" % \
915 (self
.state
, self
.type, self
.uuid
, self
.name
)
918 for job
in self
.jobs
:
919 s_jobs
.append("""<a class="state_%s %s" href="/job/%s">%s</a>""" % \
920 (job
.state
, job
.type, job
.uuid
, job
.arch
))
923 s
+= " [%s]" % ", ".join(s_jobs
)
928 def supported_arches(self
):
929 return self
.pkg
.supported_arches
932 def critical_path(self
):
933 return self
.pkg
.critical_path
935 def get_jobs(self
, type=None):
937 Returns a list of jobs of this build.
939 return self
.pakfire
.jobs
.get_by_build(self
.id, self
, type=type)
944 Get a list of all build jobs that are in this build.
946 if self
._jobs
is None:
947 self
._jobs
= self
.get_jobs(type="build")
953 if self
._jobs
_test
is None:
954 self
._jobs
_test
= self
.get_jobs(type="test")
956 return self
._jobs
_test
959 def all_jobs_finished(self
):
962 for job
in self
.jobs
:
963 if not job
.state
== "finished":
969 def create_autojobs(self
, arches
=None, type="build"):
972 # Arches may be passed to this function. If not we use all arches
973 # this package supports.
975 arches
= self
.supported_arches
977 # Create a new job for every given archirecture.
978 for arch
in self
.pakfire
.arches
.expand(arches
):
979 # Don't create jobs for src.
980 if arch
.name
== "src":
983 job
= self
.add_job(arch
, type=type)
986 # Return all newly created jobs.
989 def add_job(self
, arch
, type="build"):
990 job
= Job
.create(self
.pakfire
, self
, arch
, type=type)
992 # Add new job to cache.
994 self
._jobs
.append(job
)
1001 def update_id(self
):
1002 if not self
.type == "release":
1005 # Generate an update ID if none does exist, yet.
1006 self
.generate_update_id()
1009 "%s" % self
.distro
.name
.replace(" ", "").upper(),
1010 "%04d" % (self
.data
.update_year
or 0),
1011 "%04d" % (self
.data
.update_num
or 0),
1016 def generate_update_id(self
):
1017 if not self
.type == "release":
1020 if self
.data
.update_num
:
1023 update
= self
.db
.get("SELECT update_num AS num FROM builds \
1024 WHERE update_year = YEAR(NOW()) ORDER BY update_num DESC LIMIT 1")
1027 update_num
= update
.num
+ 1
1031 self
.db
.execute("UPDATE builds SET update_year = YEAR(NOW()), update_num = %s \
1032 WHERE id = %s", update_num
, self
.id)
1036 def get_comments(self
, limit
=10, offset
=0):
1037 query
= "SELECT * FROM builds_comments \
1038 JOIN users ON builds_comments.user_id = users.id \
1039 WHERE build_id = %s ORDER BY time_created ASC"
1042 for comment
in self
.db
.query(query
, self
.id):
1043 comment
= logs
.CommentLogEntry(self
.pakfire
, comment
)
1044 comments
.append(comment
)
1048 def add_comment(self
, user
, text
, credit
):
1049 # Add the new comment to the database.
1050 id = self
.db
.execute("INSERT INTO \
1051 builds_comments(build_id, user_id, text, credit, time_created) \
1052 VALUES(%s, %s, %s, %s, NOW())",
1053 self
.id, user
.id, text
, credit
)
1055 # Update the credit cache.
1056 if not self
._credits
is None:
1057 self
._credits
+= credit
1059 # Send the new comment to all watchers and stuff.
1060 self
.send_comment_message(id)
1062 # Return the ID of the newly created comment.
1068 if self
._credits
is None:
1069 # Get the sum of the credits from the database.
1070 query
= self
.db
.get(
1071 "SELECT SUM(credit) as credits FROM builds_comments WHERE build_id = %s",
1075 self
._credits
= query
.credits
or 0
1077 return self
._credits
1084 def get_commenters(self
):
1085 users
= self
.db
.query("SELECT DISTINCT users.id AS id FROM builds_comments \
1086 JOIN users ON builds_comments.user_id = users.id \
1087 WHERE builds_comments.build_id = %s AND NOT users.deleted = 'Y' \
1088 AND NOT users.activated = 'Y' ORDER BY users.id", self
.id)
1090 return [users
.User(self
.pakfire
, u
.id) for u
in users
]
1092 def send_comment_message(self
, comment_id
):
1093 comment
= self
.db
.get("SELECT * FROM builds_comments WHERE id = %s",
1097 assert comment
.build_id
== self
.id
1099 # Get user who wrote the comment.
1100 user
= self
.pakfire
.users
.get_by_id(comment
.user_id
)
1103 "build_name" : self
.name
,
1104 "user_name" : user
.realname
,
1107 # XXX create beautiful message
1109 self
.pakfire
.messages
.send_to_all(self
.message_recipients
,
1110 N_("%(user_name)s commented on %(build_name)s"),
1111 comment
.text
, format
)
1115 def get_log(self
, comments
=True, repo
=True, limit
=None):
1119 created_entry
= logs
.CreatedLogEntry(self
.pakfire
, self
)
1120 entries
.append(created_entry
)
1123 entries
+= self
.get_comments(limit
=limit
)
1126 entries
+= self
.get_repo_moves(limit
=limit
)
1128 # Sort all entries in chronological order.
1132 entries
= entries
[:limit
]
1138 def get_watchers(self
):
1139 query
= self
.db
.query("SELECT DISTINCT users.id AS id FROM builds_watchers \
1140 JOIN users ON builds_watchers.user_id = users.id \
1141 WHERE builds_watchers.build_id = %s AND NOT users.deleted = 'Y' \
1142 AND users.activated = 'Y' ORDER BY users.id", self
.id)
1144 return [users
.User(self
.pakfire
, u
.id) for u
in query
]
1146 def add_watcher(self
, user
):
1147 # Don't add a user twice.
1148 if user
in self
.get_watchers():
1151 self
.db
.execute("INSERT INTO builds_watchers(build_id, user_id) \
1152 VALUES(%s, %s)", self
.id, user
.id)
1155 def message_recipients(self
):
1158 for watcher
in self
.get_watchers():
1159 ret
.append("%s <%s>" % (watcher
.realname
, watcher
.email
))
1165 if self
._update
is None:
1166 update
= self
.db
.get("SELECT update_id AS id FROM updates_builds \
1167 WHERE build_id = %s", self
.id)
1170 self
._update
= updates
.Update(self
.pakfire
, update
.id)
1176 if self
._repo
is None:
1177 repo
= self
.db
.get("SELECT repo_id AS id FROM repositories_builds \
1178 WHERE build_id = %s", self
.id)
1181 self
._repo
= repository
.Repository(self
.pakfire
, repo
.id)
1185 def get_repo_moves(self
, limit
=None):
1186 query
= "SELECT * FROM repositories_history \
1187 WHERE build_id = %s ORDER BY time ASC"
1190 for action
in self
.db
.query(query
, self
.id):
1191 action
= logs
.RepositoryLogEntry(self
.pakfire
, action
)
1192 actions
.append(action
)
1204 def repo_time(self
):
1205 repo
= self
.db
.get("SELECT time_added FROM repositories_builds \
1206 WHERE build_id = %s", self
.id)
1209 return repo
.time_added
1211 def get_auto_move(self
):
1212 return self
.data
.auto_move
== "Y"
1214 def set_auto_move(self
, state
):
1220 self
.db
.execute("UPDATE builds SET auto_move = %s WHERE id = %s", self
.id)
1222 self
._data
["auto_move"] = state
1224 auto_move
= property(get_auto_move
, set_auto_move
)
1227 def can_move_forward(self
):
1231 # If there is no next repository, we cannot move anything.
1232 next_repo
= self
.repo
.next()
1237 # If the needed amount of score is reached, we can move forward.
1238 if self
.score
>= next_repo
.score_needed
:
1241 # If the repository does not require a minimal time,
1242 # we can move forward immediately.
1243 if not self
.repo
.time_min
:
1246 query
= self
.db
.get("SELECT NOW() - time_added AS duration FROM repositories_builds \
1247 WHERE build_id = %s", self
.id)
1248 duration
= query
.duration
1250 if duration
>= self
.repo
.time_min
:
1257 def get_bug_ids(self
):
1258 query
= self
.db
.query("SELECT bug_id FROM builds_bugs \
1259 WHERE build_id = %s", self
.id)
1261 return [b
.bug_id
for b
in query
]
1263 def add_bug(self
, bug_id
, user
=None, log
=True):
1264 # Check if this bug is already in the list of bugs.
1265 if bug_id
in self
.get_bug_ids():
1268 self
.db
.execute("INSERT INTO builds_bugs(build_id, bug_id) \
1269 VALUES(%s, %s)", self
.id, bug_id
)
1273 self
.log("bug_added", user
=user
, bug_id
=bug_id
)
1275 def rem_bug(self
, bug_id
, user
=None, log
=True):
1276 self
.db
.execute("DELETE FROM builds_bugs WHERE build_id = %s AND \
1277 bug_id = %s", self
.id, bug_id
)
1281 self
.log("bug_removed", user
=user
, bug_id
=bug_id
)
1283 def search_for_bugs(self
):
1287 pattern
= re
.compile(r
"(bug\s?|#)(\d+)")
1289 for txt
in (self
.commit
.subject
, self
.commit
.message
):
1290 for bug
in re
.finditer(pattern
, txt
):
1292 bugid
= int(bug
.group(2))
1296 # Check if a bug with the given ID exists in BZ.
1297 bug
= self
.pakfire
.bugzilla
.get_bug(bugid
)
1305 for bug_id
in self
.get_bug_ids():
1306 bug
= self
.pakfire
.bugzilla
.get_bug(bug_id
)
1314 def _update_bugs_helper(self
, repo
):
1316 This function takes a new status and generates messages that
1317 are appended to all bugs.
1320 kwargs
= BUG_MESSAGES
[repo
.type].copy()
1324 baseurl
= self
.pakfire
.settings
.get("baseurl", "")
1326 "build_url" : "%s/build/%s" % (baseurl
, self
.uuid
),
1327 "distro_name" : self
.distro
.name
,
1328 "package_name" : self
.name
,
1329 "repo_name" : repo
.name
,
1331 kwargs
["comment"] = kwargs
["comment"] % args
1333 self
.update_bugs(**kwargs
)
1335 def _update_bug(self
, bug_id
, status
=None, resolution
=None, comment
=None):
1336 self
.db
.execute("INSERT INTO builds_bugs_updates(bug_id, status, resolution, comment, time) \
1337 VALUES(%s, %s, %s, %s, NOW())", bug_id
, status
, resolution
, comment
)
1339 def update_bugs(self
, status
, resolution
=None, comment
=None):
1340 # Update all bugs linked to this build.
1341 for bug_id
in self
.get_bug_ids():
1342 self
._update
_bug
(bug_id
, status
=status
, resolution
=resolution
, comment
=comment
)
1345 class Jobs(base
.Object
):
1346 def get_by_id(self
, id, data
=None):
1347 return Job(self
.pakfire
, id, data
)
1349 def get_by_uuid(self
, uuid
):
1350 job
= self
.db
.get("SELECT id FROM jobs WHERE uuid = %s", uuid
)
1353 return self
.get_by_id(job
.id)
1355 def get_by_build(self
, build_id
, build
=None, type=None):
1357 Get all jobs in the specifies build.
1359 query
= "SELECT * FROM jobs WHERE build_id = %s"
1363 query
+= " AND type = %s"
1366 # Get IDs of all builds in this group.
1368 for job
in self
.db
.query(query
, *args
):
1369 job
= Job(self
.pakfire
, job
.id, job
)
1371 # If the Build object was set, we set it so it won't be retrieved
1372 # from the database again.
1378 # Return sorted list of jobs.
1381 def get_active(self
, host_id
=None, builder
=None, states
=None):
1383 host_id
= builder
.id
1386 states
= ["dispatching", "running", "uploading"]
1388 query
= "SELECT * FROM jobs WHERE state IN (%s)" % ", ".join(["%s"] * len(states
))
1392 query
+= " AND builder_id = %s" % host_id
1394 query
+= " ORDER BY \
1396 WHEN jobs.state = 'running' THEN 0 \
1397 WHEN jobs.state = 'uploading' THEN 1 \
1398 WHEN jobs.state = 'dispatching' THEN 2 \
1399 WHEN jobs.state = 'pending' THEN 3 \
1400 WHEN jobs.state = 'new' THEN 4 \
1401 END, time_started ASC"
1403 return [Job(self
.pakfire
, j
.id, j
) for j
in self
.db
.query(query
, *args
)]
1405 def get_next_iter(self
, *args
, **kwargs
):
1406 return iter(self
.get_next(*args
, **kwargs
))
1408 def get_next(self
, arches
=None, builder
=None, limit
=None, offset
=None, type=None,
1409 state
=None, states
=None, max_tries
=None):
1411 if state
and states
is None:
1414 query
= "SELECT * FROM jobs \
1415 INNER JOIN jobs_queue ON jobs.id = jobs_queue.id"
1419 query
+= " AND jobs_queue.arch IN (%s)" % ", ".join(["%s"] * len(arches
))
1423 query
+= " AND jobs_queue.designated_builder_id = %s"
1424 args
.append(builder
.id)
1427 query
+= " AND jobs.max_tries <= %s"
1428 args
.append(max_tries
)
1431 query
+= " AND jobs.state IN (%s)" % ", ".join(["%s"] * len(states
))
1435 query
+= " AND jobs.type = %s"
1439 query
+= " LIMIT %s"
1443 for row
in self
.db
.query(query
, *args
):
1444 job
= self
.pakfire
.jobs
.get_by_id(row
.id, row
)
1447 # Reverse the order of the builds.
1452 def get_latest(self
, arch
=None, builder
=None, limit
=None, age
=None, date
=None):
1453 query
= "SELECT * FROM jobs"
1456 where
= ["(state = 'finished' OR state = 'failed' OR state = 'aborted')"]
1459 where
.append("arch = %s")
1463 where
.append("builder_id = %s")
1464 args
.append(builder
.id)
1468 year
, month
, day
= date
.split("-", 2)
1469 date
= datetime
.date(int(year
), int(month
), int(day
))
1473 where
.append("(DATE(time_created) = %s OR \
1474 DATE(time_started) = %s OR DATE(time_finished) = %s)")
1475 args
+= (date
, date
, date
)
1478 where
.append("time_finished >= NOW() - '%s'::interval" % age
)
1481 query
+= " WHERE %s" % " AND ".join(where
)
1483 query
+= " ORDER BY time_finished DESC"
1486 query
+= " LIMIT %s"
1489 return [Job(self
.pakfire
, j
.id, j
) for j
in self
.db
.query(query
, *args
)]
1491 def get_average_build_time(self
):
1493 Returns the average build time of all finished builds from the
1496 result
= self
.db
.get("SELECT AVG(time_finished - time_started) as average \
1497 FROM jobs WHERE type = 'build' AND state = 'finished' AND \
1498 time_finished >= NOW() - '3 months'::interval")
1501 return result
.average
1503 def count(self
, *states
):
1504 query
= "SELECT COUNT(*) AS count FROM jobs"
1508 query
+= " WHERE state IN %s"
1511 jobs
= self
.db
.get(query
, *args
)
1515 def get_queue_length(self
, state
=None):
1517 res
= self
.db
.get("SELECT COUNT(*) AS count FROM jobs_queue \
1518 LEFT JOIN jobs ON jobs_queue.id = jobs.id WHERE state = %s", state
)
1520 res
= self
.db
.get("SELECT COUNT(*) AS count FROM jobs_queue")
1527 def get_avg_wait_time(self
):
1528 res
= self
.db
.get("SELECT AVG(time_waiting) AS time_waiting FROM jobs_waiting")
1530 if res
and res
.time_waiting
:
1532 return int(res
.time_waiting
)
1538 def get_state_stats(self
):
1539 res
= self
.db
.query("SELECT state, COUNT(*) AS count FROM jobs GROUP BY state")
1553 "temporary_failed" : 0,
1554 "dependency_error" : 0,
1555 "download_error" : 0,
1559 ret
[row
.state
] = int(row
.count
)
1563 def get_build_durations(self
):
1564 res
= self
.db
.query("SELECT platform, MIN(duration) AS minimum, \
1565 MAX(duration) AS maximum, AVG(duration) AS average, \
1566 STDDEV_POP(duration) AS stddev \
1567 FROM builds_times GROUP BY platform \
1568 UNION SELECT 'all', MIN(duration) AS minimum, \
1569 MAX(duration) AS maximum, AVG(duration) AS average, \
1570 STDDEV_POP(duration) AS stddev \
1575 ret
[row
.platform
] = {
1576 "minimum" : int(row
.minimum
),
1577 "maximum" : int(row
.maximum
),
1578 "average" : int(row
.average
),
1579 "stddev" : int(row
.stddev
),
1585 class Job(base
.Object
):
1586 def __init__(self
, pakfire
, id, data
=None):
1587 base
.Object
.__init
__(self
, pakfire
)
1589 # The ID of this Job object.
1592 # Cache the data of this object.
1595 self
._builder
= None
1596 self
._packages
= None
1597 self
._logfiles
= None
1600 return "<%s id=%s %s>" % (self
.__class
__.__name
__, self
.id, self
.name
)
1602 def __cmp__(self
, other
):
1603 if self
.type == "build" and other
.type == "test":
1605 elif self
.type == "test" and other
.type == "build":
1608 if self
.build_id
== other
.build_id
:
1609 return cmp(self
.arch
, other
.arch
)
1611 ret
= cmp(self
.pkg
, other
.pkg
)
1614 ret
= cmp(self
.time_created
, other
.time_created
)
1620 assert self
.build
.distro
1621 return self
.build
.distro
1624 def create(cls
, pakfire
, build
, arch
, type="build"):
1625 id = pakfire
.db
.execute("INSERT INTO jobs(uuid, type, build_id, arch, time_created) \
1626 VALUES(%s, %s, %s, %s, NOW())", "%s" % uuid
.uuid4(), type, build
.id, arch
)
1628 job
= Job(pakfire
, id)
1631 # Set cache for Build object.
1634 # Jobs are by default in state "new" and wait for being checked
1635 # for dependencies. Packages that do have no build dependencies
1636 # can directly be forwarded to "pending" state.
1637 if not job
.pkg
.requires
:
1638 job
.state
= "pending"
1643 self
.__delete
_buildroots
()
1644 self
.__delete
_history
()
1645 self
.__delete
_packages
()
1646 self
.__delete
_logfiles
()
1648 # Delete the job itself.
1649 self
.db
.execute("DELETE FROM jobs WHERE id = %s", self
.id)
1651 def __delete_buildroots(self
):
1653 Removes all buildroots.
1655 self
.db
.execute("DELETE FROM jobs_buildroots WHERE job_id = %s", self
.id)
1657 def __delete_history(self
):
1659 Removes all references in the history to this build job.
1661 self
.db
.execute("DELETE FROM jobs_history WHERE job_id = %s", self
.id)
1663 def __delete_packages(self
):
1665 Deletes all uploaded files from the job.
1667 for pkg
in self
.packages
:
1670 self
.db
.execute("DELETE FROM jobs_packages WHERE job_id = %s", self
.id)
1672 def __delete_logfiles(self
):
1673 for logfile
in self
.logfiles
:
1674 self
.db
.execute("INSERT INTO queue_delete(path) VALUES(%s)", logfile
.path
)
1676 def reset(self
, user
=None):
1677 self
.__delete
_buildroots
()
1678 self
.__delete
_packages
()
1679 self
.__delete
_history
()
1680 self
.__delete
_logfiles
()
1683 self
.log("reset", user
=user
)
1687 if self
._data
is None:
1688 self
._data
= self
.db
.get("SELECT * FROM jobs WHERE id = %s", self
.id)
1695 def log(self
, action
, user
=None, state
=None, builder
=None, test_job
=None):
1702 builder_id
= builder
.id
1706 test_job_id
= test_job
.id
1708 self
.db
.execute("INSERT INTO jobs_history(job_id, action, state, user_id, \
1709 time, builder_id, test_job_id) VALUES(%s, %s, %s, %s, NOW(), %s, %s)",
1710 self
.id, action
, state
, user_id
, builder_id
, test_job_id
)
1712 def get_log(self
, limit
=None, offset
=None, user
=None):
1713 query
= "SELECT * FROM jobs_history"
1715 conditions
= ["job_id = %s",]
1719 conditions
.append("user_id = %s")
1720 args
.append(user
.id)
1723 query
+= " WHERE %s" % " AND ".join(conditions
)
1725 query
+= " ORDER BY time DESC"
1729 query
+= " LIMIT %s,%s"
1730 args
+= [offset
, limit
,]
1732 query
+= " LIMIT %s"
1736 for entry
in self
.db
.query(query
, *args
):
1737 entry
= logs
.JobLogEntry(self
.pakfire
, entry
)
1738 entries
.append(entry
)
1744 return self
.data
.uuid
1748 return self
.data
.type
1752 return self
.data
.build_id
1756 if self
._build
is None:
1757 self
._build
= self
.pakfire
.builds
.get_by_id(self
.build_id
)
1763 def related_jobs(self
):
1766 for job
in self
.build
.jobs
:
1776 return self
.build
.pkg
1780 return "%s-%s.%s" % (self
.pkg
.name
, self
.pkg
.friendly_version
, self
.arch
)
1784 return sum((p
.size
for p
in self
.packages
))
1786 def is_running(self
):
1788 Returns True if job is in a running state.
1790 return self
.state
in ("pending", "dispatching", "running", "uploading")
1792 def get_state(self
):
1793 return self
.data
.state
1795 def set_state(self
, state
, user
=None, log
=True):
1796 # Nothing to do if the state remains.
1797 if not self
.state
== state
:
1798 self
.db
.execute("UPDATE jobs SET state = %s WHERE id = %s", state
, self
.id)
1801 if log
and not state
== "new":
1802 self
.log("state_change", state
=state
, user
=user
)
1806 self
._data
["state"] = state
1808 # Always clear the message when the status is changed.
1809 self
.update_message(None)
1811 # Update some more informations.
1812 if state
== "dispatching":
1814 self
.db
.execute("UPDATE jobs SET time_started = NOW(), time_finished = NULL \
1815 WHERE id = %s", self
.id)
1817 elif state
== "pending":
1818 self
.db
.execute("UPDATE jobs SET tries = tries + 1, time_started = NULL, \
1819 time_finished = NULL WHERE id = %s", self
.id)
1821 elif state
in ("aborted", "dependency_error", "finished", "failed"):
1822 # Set finish time and reset builder..
1823 self
.db
.execute("UPDATE jobs SET time_finished = NOW() WHERE id = %s", self
.id)
1825 # Send messages to the user.
1826 if state
== "finished":
1827 self
.send_finished_message()
1829 elif state
== "failed":
1830 # Remove all package files if a job is set to failed state.
1831 self
.__delete
_packages
()
1833 self
.send_failed_message()
1835 # Automatically update the state of the build (not on test builds).
1836 if self
.type == "build":
1837 self
.build
.auto_update_state()
1839 state
= property(get_state
, set_state
)
1843 return self
.data
.message
1845 def update_message(self
, msg
):
1846 self
.db
.execute("UPDATE jobs SET message = %s WHERE id = %s",
1850 self
._data
["message"] = msg
1853 def builder_id(self
):
1854 return self
.data
.builder_id
1856 def get_builder(self
):
1857 if not self
.builder_id
:
1860 if self
._builder
is None:
1861 self
._builder
= builders
.Builder(self
.pakfire
, self
.builder_id
)
1862 assert self
._builder
1864 return self
._builder
1866 def set_builder(self
, builder
, user
=None):
1867 self
.db
.execute("UPDATE jobs SET builder_id = %s WHERE id = %s",
1868 builder
.id, self
.id)
1872 self
._data
["builder_id"] = builder
.id
1874 self
._builder
= builder
1878 self
.log("builder_assigned", builder
=builder
, user
=user
)
1880 builder
= property(get_builder
, set_builder
)
1884 return self
.data
.arch
1888 return self
.backend
.arches
.get_by_name(self
.arch
)
1892 if not self
.time_started
:
1895 if self
.time_finished
:
1896 delta
= self
.time_finished
- self
.time_started
1898 delta
= datetime
.datetime
.utcnow() - self
.time_started
1900 return delta
.total_seconds()
1903 def time_created(self
):
1904 return self
.data
.time_created
1907 def time_started(self
):
1908 return self
.data
.time_started
1911 def time_finished(self
):
1912 return self
.data
.time_finished
1915 def expected_runtime(self
):
1917 Returns the estimated time and stddev, this job takes to finish.
1919 # Get the average build time.
1920 build_times
= self
.pakfire
.builds
.get_build_times_by_arch(self
.arch
,
1923 # If there is no statistical data, we cannot estimate anything.
1927 return build_times
.average
, build_times
.stddev
1931 expected_runtime
, stddev
= self
.expected_runtime
1933 if expected_runtime
:
1934 return expected_runtime
- int(self
.duration
), stddev
1938 return self
.data
.tries
1942 if self
._packages
is None:
1945 query
= "SELECT pkg_id AS id FROM jobs_packages \
1946 JOIN packages ON packages.id = jobs_packages.pkg_id \
1947 WHERE jobs_packages.job_id = %s ORDER BY packages.name"
1949 for pkg
in self
.db
.query(query
, self
.id):
1950 pkg
= packages
.Package(self
.pakfire
, pkg
.id)
1953 self
._packages
.append(pkg
)
1955 return self
._packages
1957 def get_pkg_by_uuid(self
, uuid
):
1958 pkg
= self
.db
.get("SELECT packages.id FROM packages \
1959 JOIN jobs_packages ON jobs_packages.pkg_id = packages.id \
1960 WHERE jobs_packages.job_id = %s AND packages.uuid = %s",
1966 pkg
= packages
.Package(self
.pakfire
, pkg
.id)
1973 if self
._logfiles
is None:
1976 for log
in self
.db
.query("SELECT id FROM logfiles WHERE job_id = %s", self
.id):
1977 log
= logs
.LogFile(self
.pakfire
, log
.id)
1980 self
._logfiles
.append(log
)
1982 return self
._logfiles
1984 def add_file(self
, filename
):
1986 Add the specified file to this job.
1988 The file is copied to the right directory by this function.
1990 assert os
.path
.exists(filename
)
1992 if filename
.endswith(".log"):
1993 self
._add
_file
_log
(filename
)
1995 elif filename
.endswith(".%s" % PACKAGE_EXTENSION
):
1996 # It is not allowed to upload packages on test builds.
1997 if self
.type == "test":
2000 self
._add
_file
_package
(filename
)
2002 def _add_file_log(self
, filename
):
2004 Attach a log file to this job.
2006 target_dirname
= os
.path
.join(self
.build
.path
, "logs")
2008 if self
.type == "test":
2011 target_filename
= os
.path
.join(target_dirname
,
2012 "test.%s.%s.%s.log" % (self
.arch
, i
, self
.tries
))
2014 if os
.path
.exists(target_filename
):
2019 target_filename
= os
.path
.join(target_dirname
,
2020 "build.%s.%s.log" % (self
.arch
, self
.tries
))
2022 # Make sure the target directory exists.
2023 if not os
.path
.exists(target_dirname
):
2024 os
.makedirs(target_dirname
)
2026 # Calculate a SHA512 hash from that file.
2027 f
= open(filename
, "rb")
2028 h
= hashlib
.sha512()
2030 buf
= f
.read(BUFFER_SIZE
)
2037 # Copy the file to the final location.
2038 shutil
.copy2(filename
, target_filename
)
2040 # Create an entry in the database.
2041 self
.db
.execute("INSERT INTO logfiles(job_id, path, filesize, hash_sha512) \
2042 VALUES(%s, %s, %s, %s)", self
.id, os
.path
.relpath(target_filename
, PACKAGES_DIR
),
2043 os
.path
.getsize(target_filename
), h
.hexdigest())
2045 def _add_file_package(self
, filename
):
2046 # Open package (creates entry in the database).
2047 pkg
= packages
.Package
.open(self
.pakfire
, filename
)
2049 # Move package to the build directory.
2050 pkg
.move(os
.path
.join(self
.build
.path
, self
.arch
))
2052 # Attach the package to this job.
2053 self
.db
.execute("INSERT INTO jobs_packages(job_id, pkg_id) VALUES(%s, %s)",
2056 def get_aborted_state(self
):
2057 return self
.data
.aborted_state
2059 def set_aborted_state(self
, state
):
2060 self
.db
.execute("UPDATE jobs SET aborted_state = %s WHERE id = %s",
2064 self
._data
["aborted_state"] = state
2066 aborted_state
= property(get_aborted_state
, set_aborted_state
)
2069 def message_recipients(self
):
2072 # Add all people watching the build.
2073 l
+= self
.build
.message_recipients
2075 # Add the package maintainer on release builds.
2076 if self
.build
.type == "release":
2077 maint
= self
.pkg
.maintainer
2079 if isinstance(maint
, users
.User
):
2080 l
.append("%s <%s>" % (maint
.realname
, maint
.email
))
2084 # XXX add committer and commit author.
2086 # Add the owner of the scratch build on scratch builds.
2087 elif self
.build
.type == "scratch" and self
.build
.user
:
2088 l
.append("%s <%s>" % \
2089 (self
.build
.user
.realname
, self
.build
.user
.email
))
2093 def save_buildroot(self
, pkgs
):
2096 for pkg_name
, pkg_uuid
in pkgs
:
2097 rows
.append((self
.id, self
.tries
, pkg_uuid
, pkg_name
))
2099 # Cleanup old stuff first (for rebuilding packages).
2100 self
.db
.execute("DELETE FROM jobs_buildroots WHERE job_id = %s AND tries = %s",
2101 self
.id, self
.tries
)
2103 self
.db
.executemany("INSERT INTO \
2104 jobs_buildroots(job_id, tries, pkg_uuid, pkg_name) \
2105 VALUES(%s, %s, %s, %s)", rows
)
2107 def has_buildroot(self
, tries
=None):
2111 res
= self
.db
.get("SELECT COUNT(*) AS num FROM jobs_buildroots \
2112 WHERE jobs_buildroots.job_id = %s AND jobs_buildroots.tries = %s \
2113 ORDER BY pkg_name", self
.id, tries
)
2120 def get_buildroot(self
, tries
=None):
2124 rows
= self
.db
.query("SELECT * FROM jobs_buildroots \
2125 WHERE jobs_buildroots.job_id = %s AND jobs_buildroots.tries = %s \
2126 ORDER BY pkg_name", self
.id, tries
)
2130 # Search for this package in the packages table.
2131 pkg
= self
.pakfire
.packages
.get_by_uuid(row
.pkg_uuid
)
2132 pkgs
.append((row
.pkg_name
, row
.pkg_uuid
, pkg
))
2136 def send_finished_message(self
):
2137 # Send no finished mails for test jobs.
2138 if self
.type == "test":
2141 logging
.debug("Sending finished message for job %s to %s" % \
2142 (self
.name
, ", ".join(self
.message_recipients
)))
2145 "build_name" : self
.name
,
2146 "build_host" : self
.builder
.name
,
2147 "build_uuid" : self
.uuid
,
2150 self
.pakfire
.messages
.send_to_all(self
.message_recipients
,
2151 MSG_BUILD_FINISHED_SUBJECT
, MSG_BUILD_FINISHED
, info
)
2153 def send_failed_message(self
):
2154 logging
.debug("Sending failed message for job %s to %s" % \
2155 (self
.name
, ", ".join(self
.message_recipients
)))
2159 build_host
= self
.builder
.name
2162 "build_name" : self
.name
,
2163 "build_host" : build_host
,
2164 "build_uuid" : self
.uuid
,
2167 self
.pakfire
.messages
.send_to_all(self
.message_recipients
,
2168 MSG_BUILD_FAILED_SUBJECT
, MSG_BUILD_FAILED
, info
)
2170 def set_start_time(self
, start_time
):
2171 if start_time
is None:
2174 self
.db
.execute("UPDATE jobs SET start_not_before = NOW() + %s \
2175 WHERE id = %s LIMIT 1", start_time
, self
.id)
2177 def schedule(self
, type, start_time
=None, user
=None):
2178 assert type in ("rebuild", "test")
2180 if type == "rebuild":
2181 if self
.state
== "finished":
2184 self
.set_state("new", user
=user
, log
=False)
2185 self
.set_start_time(start_time
)
2188 self
.log("schedule_rebuild", user
=user
)
2190 elif type == "test":
2191 if not self
.state
== "finished":
2194 # Create a new job with same build and arch.
2195 job
= self
.create(self
.pakfire
, self
.build
, self
.arch
, type="test")
2196 job
.set_start_time(start_time
)
2199 self
.log("schedule_test_job", test_job
=job
, user
=user
)
2203 def schedule_test(self
, start_not_before
=None, user
=None):
2205 return self
.schedule("test", start_time
=start_not_before
, user
=user
)
2207 def schedule_rebuild(self
, start_not_before
=None, user
=None):
2209 return self
.schedule("rebuild", start_time
=start_not_before
, user
=user
)
2211 def get_build_repos(self
):
2213 Returns a list of all repositories that should be used when
2216 repo_ids
= self
.db
.query("SELECT repo_id FROM jobs_repos WHERE job_id = %s",
2220 return self
.distro
.get_build_repos()
2223 for repo
in self
.distro
.repositories
:
2224 if repo
.id in [r
.id for r
in repo_ids
]:
2227 return repos
or self
.distro
.get_build_repos()
2229 def get_repo_config(self
):
2231 Get repository configuration file that is sent to the builder.
2235 for repo
in self
.get_build_repos():
2236 confs
.append(repo
.get_conf())
2238 return "\n\n".join(confs
)
2240 def get_config(self
):
2242 Get configuration file that is sent to the builder.
2246 # Add the distribution configuration.
2247 confs
.append(self
.distro
.get_config())
2249 # Then add all repositories for this build.
2250 confs
.append(self
.get_repo_config())
2252 return "\n\n".join(confs
)
2255 if not self
.packages
:
2261 for pkg
in self
.packages
:
2262 conditions
.append(" pkg_uuid = %s")
2263 args
.append(pkg
.uuid
)
2265 query
= "SELECT DISTINCT job_id AS id FROM jobs_buildroots"
2266 query
+= " WHERE %s" % " OR ".join(conditions
)
2268 job_ids
= self
.db
.query(query
, *args
)
2272 def resolvdep(self
):
2273 config
= pakfire
.config
.Config(files
=["general.conf"])
2274 config
.parse(self
.get_config())
2276 # The filename of the source file.
2277 filename
= os
.path
.join(PACKAGES_DIR
, self
.build
.pkg
.path
)
2278 assert os
.path
.exists(filename
), filename
2280 # Create a new pakfire instance with the configuration for
2282 p
= pakfire
.PakfireServer(config
=config
, arch
=self
.arch
)
2284 # Try to solve the build dependencies.
2286 solver
= p
.resolvdep(filename
)
2288 # Catch dependency errors and log the problem string.
2289 except DependencyError
, e
:
2290 self
.state
= "dependency_error"
2291 self
.update_message(e
)
2294 # If the build dependencies can be resolved, we set the build in
2296 if solver
.status
is True:
2297 if self
.state
in ("failed",):
2300 self
.state
= "pending"