]> git.ipfire.org Git - people/jschlag/pbs.git/blob - src/buildservice/builds.py
jobs: Migrate from arch_id to arch
[people/jschlag/pbs.git] / src / buildservice / builds.py
1 #!/usr/bin/python
2
3 import datetime
4 import hashlib
5 import logging
6 import os
7 import re
8 import shutil
9 import uuid
10
11 import pakfire
12 import pakfire.config
13 import pakfire.packages
14
15 from . import base
16 from . import builders
17 from . import logs
18 from . import packages
19 from . import repository
20 from . import updates
21 from . import users
22
23 from .constants import *
24 from .decorators import *
25
26 def import_from_package(_pakfire, filename, distro=None, commit=None, type="release",
27 arches=None, check_for_duplicates=True, owner=None):
28
29 if distro is None:
30 distro = commit.source.distro
31
32 assert distro
33
34 # Open the package file to read some basic information.
35 pkg = pakfire.packages.open(None, None, filename)
36
37 if check_for_duplicates:
38 if distro.has_package(pkg.name, pkg.epoch, pkg.version, pkg.release):
39 logging.warning("Duplicate package detected: %s. Skipping." % pkg)
40 return
41
42 # Open the package and add it to the database.
43 pkg = packages.Package.open(_pakfire, filename)
44 logging.debug("Created new package: %s" % pkg)
45
46 # Associate the package to the processed commit.
47 if commit:
48 pkg.commit = commit
49
50 # Create a new build object from the package which
51 # is always a release build.
52 build = Build.create(_pakfire, pkg, type=type, owner=owner, distro=distro)
53 logging.debug("Created new build job: %s" % build)
54
55 # Create all automatic jobs.
56 build.create_autojobs(arches=arches)
57
58 return pkg, build
59
60
61 class Builds(base.Object):
62 def get_by_id(self, id, data=None):
63 return Build(self.pakfire, id, data=data)
64
65 def get_by_uuid(self, uuid):
66 build = self.db.get("SELECT id FROM builds WHERE uuid = %s LIMIT 1", uuid)
67
68 if build:
69 return self.get_by_id(build.id)
70
71 def get_all(self, limit=50):
72 query = "SELECT * FROM builds ORDER BY time_created DESC"
73
74 if limit:
75 query += " LIMIT %d" % limit
76
77 return [self.get_by_id(b.id, b) for b in self.db.query(query)]
78
79 def get_by_user(self, user, type=None, public=None):
80 args = []
81 conditions = []
82
83 if not type or type == "scratch":
84 # On scratch builds the user id equals the owner id.
85 conditions.append("(builds.type = 'scratch' AND owner_id = %s)")
86 args.append(user.id)
87
88 elif not type or type == "release":
89 pass # TODO
90
91 if public is True:
92 conditions.append("public = 'Y'")
93 elif public is False:
94 conditions.append("public = 'N'")
95
96 query = "SELECT builds.* AS id FROM builds \
97 JOIN packages ON builds.pkg_id = packages.id"
98
99 if conditions:
100 query += " WHERE %s" % " AND ".join(conditions)
101
102 query += " ORDER BY builds.time_created DESC"
103
104 builds = []
105 for build in self.db.query(query, *args):
106 build = Build(self.pakfire, build.id, build)
107 builds.append(build)
108
109 return builds
110
111 def get_by_name(self, name, type=None, public=None, user=None, limit=None, offset=None):
112 args = [name,]
113 conditions = [
114 "packages.name = %s",
115 ]
116
117 if type:
118 conditions.append("builds.type = %s")
119 args.append(type)
120
121 or_conditions = []
122 if public is True:
123 or_conditions.append("public = 'Y'")
124 elif public is False:
125 or_conditions.append("public = 'N'")
126
127 if user and not user.is_admin():
128 or_conditions.append("builds.owner_id = %s")
129 args.append(user.id)
130
131 query = "SELECT builds.* AS id FROM builds \
132 JOIN packages ON builds.pkg_id = packages.id"
133
134 if or_conditions:
135 conditions.append(" OR ".join(or_conditions))
136
137 if conditions:
138 query += " WHERE %s" % " AND ".join(conditions)
139
140 if type == "release":
141 query += " ORDER BY packages.name,packages.epoch,packages.version,packages.release,id ASC"
142 elif type == "scratch":
143 query += " ORDER BY time_created DESC"
144
145 if limit:
146 if offset:
147 query += " LIMIT %s,%s"
148 args.extend([offset, limit])
149 else:
150 query += " LIMIT %s"
151 args.append(limit)
152
153 return [Build(self.pakfire, b.id, b) for b in self.db.query(query, *args)]
154
155 def get_latest_by_name(self, name, type=None, public=None):
156 query = "\
157 SELECT * FROM builds \
158 LEFT JOIN builds_latest ON builds.id = builds_latest.build_id \
159 WHERE builds_latest.package_name = %s"
160 args = [name,]
161
162 if type:
163 query += " AND builds_latest.build_type = %s"
164 args.append(type)
165
166 if public is True:
167 query += " AND builds.public = %s"
168 args.append("Y")
169 elif public is False:
170 query += " AND builds.public = %s"
171 args.append("N")
172
173 # Get the last one only.
174 # Prefer release builds over scratch builds.
175 query += "\
176 ORDER BY \
177 CASE builds.type WHEN 'release' THEN 0 ELSE 1 END, \
178 builds.time_created DESC \
179 LIMIT 1"
180
181 res = self.db.get(query, *args)
182
183 if res:
184 return Build(self.pakfire, res.id, res)
185
186 def get_active_builds(self, name, public=None):
187 query = "\
188 SELECT * FROM builds \
189 LEFT JOIN builds_latest ON builds.id = builds_latest.build_id \
190 WHERE builds_latest.package_name = %s AND builds.type = %s"
191 args = [name, "release"]
192
193 if public is True:
194 query += " AND builds.public = %s"
195 args.append("Y")
196 elif public is False:
197 query += " AND builds.public = %s"
198 args.append("N")
199
200 builds = []
201 for row in self.db.query(query, *args):
202 b = Build(self.pakfire, row.id, row)
203 builds.append(b)
204
205 # Sort the result. Lastest build first.
206 builds.sort(reverse=True)
207
208 return builds
209
210 def count(self):
211 builds = self.db.get("SELECT COUNT(*) AS count FROM builds")
212 if builds:
213 return builds.count
214
215 def needs_test(self, threshold, arch, limit=None, randomize=False):
216 query = "SELECT id FROM builds \
217 WHERE NOT EXISTS \
218 (SELECT * FROM jobs WHERE \
219 jobs.build_id = builds.id AND \
220 jobs.arch = %s AND \
221 (jobs.state != 'finished' OR \
222 jobs.time_finished >= %s) \
223 ) \
224 AND EXISTS \
225 (SELECT * FROM jobs WHERE \
226 jobs.build_id = builds.id AND \
227 jobs.arch = %s AND \
228 jobs.type = 'build' AND \
229 jobs.state = 'finished' AND \
230 jobs.time_finished < %s \
231 ) \
232 AND builds.type = 'release' \
233 AND (builds.state = 'stable' OR builds.state = 'testing')"
234 args = [arch, threshold, arch, threshold]
235
236 if randomize:
237 query += " ORDER BY RAND()"
238
239 if limit:
240 query += " LIMIT %s"
241 args.append(limit)
242
243 return [Build(self.pakfire, b.id) for b in self.db.query(query, *args)]
244
245 def get_obsolete(self, repo=None):
246 """
247 Get all obsoleted builds.
248
249 If repo is True: which are in any repository.
250 If repo is some Repository object: which are in this repository.
251 """
252 args = []
253
254 if repo is None:
255 query = "SELECT id FROM builds WHERE state = 'obsolete'"
256
257 else:
258 query = "SELECT build_id AS id FROM repositories_builds \
259 JOIN builds ON builds.id = repositories_builds.build_id \
260 WHERE builds.state = 'obsolete'"
261
262 if repo and not repo is True:
263 query += " AND repositories_builds.repo_id = %s"
264 args.append(repo.id)
265
266 res = self.db.query(query, *args)
267
268 builds = []
269 for build in res:
270 build = Build(self.pakfire, build.id)
271 builds.append(build)
272
273 return builds
274
275 def get_changelog(self, name, public=None, limit=5, offset=0):
276 query = "SELECT builds.* FROM builds \
277 JOIN packages ON builds.pkg_id = packages.id \
278 WHERE \
279 builds.type = %s \
280 AND \
281 packages.name = %s"
282 args = ["release", name,]
283
284 if public == True:
285 query += " AND builds.public = %s"
286 args.append("Y")
287 elif public == False:
288 query += " AND builds.public = %s"
289 args.append("N")
290
291 query += " ORDER BY builds.time_created DESC"
292
293 if limit:
294 if offset:
295 query += " LIMIT %s,%s"
296 args += [offset, limit]
297 else:
298 query += " LIMIT %s"
299 args.append(limit)
300
301 builds = []
302 for b in self.db.query(query, *args):
303 b = Build(self.pakfire, b.id, b)
304 builds.append(b)
305
306 builds.sort(reverse=True)
307
308 return builds
309
310 def get_comments(self, limit=10, offset=None, user=None):
311 query = "SELECT * FROM builds_comments \
312 JOIN users ON builds_comments.user_id = users.id"
313 args = []
314
315 wheres = []
316 if user:
317 wheres.append("users.id = %s")
318 args.append(user.id)
319
320 if wheres:
321 query += " WHERE %s" % " AND ".join(wheres)
322
323 # Sort everything.
324 query += " ORDER BY time_created DESC"
325
326 # Limits.
327 if limit:
328 if offset:
329 query += " LIMIT %s,%s"
330 args.append(offset)
331 else:
332 query += " LIMIT %s"
333
334 args.append(limit)
335
336 comments = []
337 for comment in self.db.query(query, *args):
338 comment = logs.CommentLogEntry(self.pakfire, comment)
339 comments.append(comment)
340
341 return comments
342
343 def get_build_times_summary(self, name=None, job_type=None, arch=None):
344 query = "\
345 SELECT \
346 builds_times.arch AS arch, \
347 MAX(duration) AS maximum, \
348 MIN(duration) AS minimum, \
349 AVG(duration) AS average, \
350 SUM(duration) AS sum, \
351 STDDEV_POP(duration) AS stddev \
352 FROM builds_times \
353 LEFT JOIN builds ON builds_times.build_id = builds.id \
354 LEFT JOIN packages ON builds.pkg_id = packages.id"
355
356 args = []
357 conditions = []
358
359 # Filter for name.
360 if name:
361 conditions.append("packages.name = %s")
362 args.append(name)
363
364 # Filter by job types.
365 if job_type:
366 conditions.append("builds_times.job_type = %s")
367 args.append(job_type)
368
369 # Filter by arch.
370 if arch:
371 conditions.append("builds_times.arch = %s")
372 args.append(arch)
373
374 # Add conditions.
375 if conditions:
376 query += " WHERE %s" % " AND ".join(conditions)
377
378 # Grouping and sorting.
379 query += " GROUP BY arch ORDER BY arch DESC"
380
381 return self.db.query(query, *args)
382
383 def get_build_times_by_arch(self, arch, **kwargs):
384 kwargs.update({
385 "arch" : arch,
386 })
387
388 build_times = self.get_build_times_summary(**kwargs)
389 if build_times:
390 return build_times[0]
391
392 def get_types_stats(self):
393 res = self.db.query("SELECT type, COUNT(*) AS count FROM builds GROUP BY type")
394
395 if not res:
396 return {}
397
398 ret = {}
399 for row in res:
400 ret[row.type] = row.count
401
402 return ret
403
404
405 class Build(base.Object):
406 def __init__(self, pakfire, id, data=None):
407 base.Object.__init__(self, pakfire)
408
409 # ID of this build
410 self.id = id
411
412 # Cache data.
413 self._data = data
414 self._jobs = None
415 self._jobs_test = None
416 self._depends_on = None
417 self._pkg = None
418 self._credits = None
419 self._owner = None
420 self._update = None
421 self._repo = None
422 self._distro = None
423
424 def __repr__(self):
425 return "<%s id=%s %s>" % (self.__class__.__name__, self.id, self.pkg)
426
427 def __cmp__(self, other):
428 assert self.pkg
429 assert other.pkg
430
431 return cmp(self.pkg, other.pkg)
432
433 @classmethod
434 def create(cls, pakfire, pkg, type="release", owner=None, distro=None, public=True):
435 assert type in ("release", "scratch", "test")
436 assert distro, "You need to specify the distribution of this build."
437
438 if public:
439 public = "Y"
440 else:
441 public = "N"
442
443 # Check if scratch build has an owner.
444 if type == "scratch" and not owner:
445 raise Exception, "Scratch builds require an owner"
446
447 # Set the default priority of this build.
448 if type == "release":
449 priority = 0
450
451 elif type == "scratch":
452 priority = 1
453
454 elif type == "test":
455 priority = -1
456
457 id = pakfire.db.execute("""
458 INSERT INTO builds(uuid, pkg_id, type, distro_id, time_created, public, priority)
459 VALUES(%s, %s, %s, %s, NOW(), %s, %s)""", "%s" % uuid.uuid4(), pkg.id,
460 type, distro.id, public, priority)
461
462 # Set the owner of this buildgroup.
463 if owner:
464 pakfire.db.execute("UPDATE builds SET owner_id = %s WHERE id = %s",
465 owner.id, id)
466
467 build = cls(pakfire, id)
468
469 # Log that the build has been created.
470 build.log("created", user=owner)
471
472 # Create directory where the files live.
473 if not os.path.exists(build.path):
474 os.makedirs(build.path)
475
476 # Move package file to the directory of the build.
477 source_path = os.path.join(build.path, "src")
478 build.pkg.move(source_path)
479
480 # Generate an update id.
481 build.generate_update_id()
482
483 # Obsolete all other builds with the same name to track updates.
484 build.obsolete_others()
485
486 # Search for possible bug IDs in the commit message.
487 build.search_for_bugs()
488
489 return build
490
491 def delete(self):
492 """
493 Deletes this build including all jobs, packages and the source
494 package.
495 """
496 # If the build is in a repository, we need to remove it.
497 if self.repo:
498 self.repo.rem_build(self)
499
500 for job in self.jobs + self.test_jobs:
501 job.delete()
502
503 if self.pkg:
504 self.pkg.delete()
505
506 # Delete everything related to this build.
507 self.__delete_bugs()
508 self.__delete_comments()
509 self.__delete_history()
510 self.__delete_watchers()
511
512 # Delete the build itself.
513 self.db.execute("DELETE FROM builds WHERE id = %s", self.id)
514
515 def __delete_bugs(self):
516 """
517 Delete all associated bugs.
518 """
519 self.db.execute("DELETE FROM builds_bugs WHERE build_id = %s", self.id)
520
521 def __delete_comments(self):
522 """
523 Delete all comments.
524 """
525 self.db.execute("DELETE FROM builds_comments WHERE build_id = %s", self.id)
526
527 def __delete_history(self):
528 """
529 Delete the repository history.
530 """
531 self.db.execute("DELETE FROM repositories_history WHERE build_id = %s", self.id)
532
533 def __delete_watchers(self):
534 """
535 Delete all watchers.
536 """
537 self.db.execute("DELETE FROM builds_watchers WHERE build_id = %s", self.id)
538
539 def reset(self):
540 """
541 Resets the whole build so it can start again (as it has never
542 been started).
543 """
544 for job in self.jobs:
545 job.reset()
546
547 #self.__delete_bugs()
548 self.__delete_comments()
549 self.__delete_history()
550 self.__delete_watchers()
551
552 self.state = "building"
553
554 # XXX empty log
555
556 @property
557 def data(self):
558 """
559 Lazy fetching of data for this object.
560 """
561 if self._data is None:
562 self._data = self.db.get("SELECT * FROM builds WHERE id = %s", self.id)
563 assert self._data
564
565 return self._data
566
567 @property
568 def info(self):
569 """
570 A set of information that is sent to the XMLRPC client.
571 """
572 return { "uuid" : self.uuid }
573
574 def log(self, action, user=None, bug_id=None):
575 user_id = None
576 if user:
577 user_id = user.id
578
579 self.db.execute("INSERT INTO builds_history(build_id, action, user_id, time, bug_id) \
580 VALUES(%s, %s, %s, NOW(), %s)", self.id, action, user_id, bug_id)
581
582 @property
583 def uuid(self):
584 """
585 The UUID of this build.
586 """
587 return self.data.uuid
588
589 @property
590 def pkg(self):
591 """
592 Get package that is to be built in the build.
593 """
594 if self._pkg is None:
595 self._pkg = packages.Package(self.pakfire, self.data.pkg_id)
596
597 return self._pkg
598
599 @property
600 def name(self):
601 return "%s-%s" % (self.pkg.name, self.pkg.friendly_version)
602
603 @property
604 def type(self):
605 """
606 The type of this build.
607 """
608 return self.data.type
609
610 @property
611 def owner_id(self):
612 """
613 The ID of the owner of this build.
614 """
615 return self.data.owner_id
616
617 @property
618 def owner(self):
619 """
620 The owner of this build.
621 """
622 if not self.owner_id:
623 return
624
625 if self._owner is None:
626 self._owner = self.pakfire.users.get_by_id(self.owner_id)
627 assert self._owner
628
629 return self._owner
630
631 @property
632 def distro_id(self):
633 return self.data.distro_id
634
635 @property
636 def distro(self):
637 if self._distro is None:
638 self._distro = self.pakfire.distros.get_by_id(self.distro_id)
639 assert self._distro
640
641 return self._distro
642
643 @property
644 def user(self):
645 if self.type == "scratch":
646 return self.owner
647
648 def get_depends_on(self):
649 if self.data.depends_on and self._depends_on is None:
650 self._depends_on = Build(self.pakfire, self.data.depends_on)
651
652 return self._depends_on
653
654 def set_depends_on(self, build):
655 self.db.execute("UPDATE builds SET depends_on = %s WHERE id = %s",
656 build.id, self.id)
657
658 # Update cache.
659 self._depends_on = build
660 self._data["depends_on"] = build.id
661
662 depends_on = property(get_depends_on, set_depends_on)
663
664 @property
665 def created(self):
666 return self.data.time_created
667
668 @property
669 def date(self):
670 return self.created.date()
671
672 @property
673 def public(self):
674 """
675 Is this build public?
676 """
677 return self.data.public == "Y"
678
679 @property
680 def size(self):
681 """
682 Returns the size on disk of this build.
683 """
684 s = 0
685
686 # Add the source package.
687 if self.pkg:
688 s += self.pkg.size
689
690 # Add all jobs.
691 s += sum((j.size for j in self.jobs))
692
693 return s
694
695 #@property
696 #def state(self):
697 # # Cache all states.
698 # states = [j.state for j in self.jobs]
699 #
700 # target_state = "unknown"
701 #
702 # # If at least one job has failed, the whole build has failed.
703 # if "failed" in states:
704 # target_state = "failed"
705 #
706 # # It at least one of the jobs is still running, the whole
707 # # build is in running state.
708 # elif "running" in states:
709 # target_state = "running"
710 #
711 # # If all jobs are in the finished state, we turn into finished
712 # # state as well.
713 # elif all([s == "finished" for s in states]):
714 # target_state = "finished"
715 #
716 # return target_state
717
718 def auto_update_state(self):
719 """
720 Check if the state of this build can be updated and perform
721 the change if possible.
722 """
723 # Do not change the broken/obsolete state automatically.
724 if self.state in ("broken", "obsolete"):
725 return
726
727 if self.repo and self.repo.type == "stable":
728 self.update_state("stable")
729 return
730
731 # If any of the build jobs are finished, the build will be put in testing
732 # state.
733 for job in self.jobs:
734 if job.state == "finished":
735 self.update_state("testing")
736 break
737
738 def update_state(self, state, user=None, remove=False):
739 assert state in ("stable", "testing", "obsolete", "broken")
740
741 self.db.execute("UPDATE builds SET state = %s WHERE id = %s", state, self.id)
742
743 if self._data:
744 self._data["state"] = state
745
746 # In broken state, the removal from the repository is forced and
747 # all jobs that are not finished yet will be aborted.
748 if state == "broken":
749 remove = True
750
751 for job in self.jobs:
752 if job.state in ("new", "pending", "running", "dependency_error"):
753 job.state = "aborted"
754
755 # If this build is in a repository, it will leave it.
756 if remove and self.repo:
757 self.repo.rem_build(self)
758
759 # If a release build is now in testing state, we put it into the
760 # first repository of the distribution.
761 elif self.type == "release" and state == "testing":
762 # If the build is not in a repository, yet and if there is
763 # a first repository, we put the build there.
764 if not self.repo and self.distro.first_repo:
765 self.distro.first_repo.add_build(self, user=user)
766
767 @property
768 def state(self):
769 return self.data.state
770
771 def is_broken(self):
772 return self.state == "broken"
773
774 def obsolete_others(self):
775 if not self.type == "release":
776 return
777
778 for build in self.pakfire.builds.get_by_name(self.pkg.name, type="release"):
779 # Don't modify ourself.
780 if self.id == build.id:
781 continue
782
783 # Don't touch broken builds.
784 if build.state in ("obsolete", "broken"):
785 continue
786
787 # Obsolete the build.
788 build.update_state("obsolete")
789
790 def set_severity(self, severity):
791 self.db.execute("UPDATE builds SET severity = %s WHERE id = %s", state, self.id)
792
793 if self._data:
794 self._data["severity"] = severity
795
796 def get_severity(self):
797 return self.data.severity
798
799 severity = property(get_severity, set_severity)
800
801 @property
802 def commit(self):
803 if self.pkg and self.pkg.commit:
804 return self.pkg.commit
805
806 def update_message(self, msg):
807 self.db.execute("UPDATE builds SET message = %s WHERE id = %s", msg, self.id)
808
809 if self._data:
810 self._data["message"] = msg
811
812 def has_perm(self, user):
813 """
814 Check, if the given user has the right to perform administrative
815 operations on this build.
816 """
817 if user is None:
818 return False
819
820 if user.is_admin():
821 return True
822
823 # Check if the user is allowed to manage packages from the critical path.
824 if self.critical_path and not user.has_perm("manage_critical_path"):
825 return False
826
827 # Search for maintainers...
828
829 # Scratch builds.
830 if self.type == "scratch":
831 # The owner of a scratch build has the right to do anything with it.
832 if self.owner_id == user.id:
833 return True
834
835 # Release builds.
836 elif self.type == "release":
837 # The maintainer also is allowed to manage the build.
838 if self.pkg.maintainer == user:
839 return True
840
841 # Deny permission for all other cases.
842 return False
843
844 @property
845 def message(self):
846 message = ""
847
848 if self.data.message:
849 message = self.data.message
850
851 elif self.commit:
852 if self.commit.message:
853 message = "\n".join((self.commit.subject, self.commit.message))
854 else:
855 message = self.commit.subject
856
857 prefix = "%s: " % self.pkg.name
858 if message.startswith(prefix):
859 message = message[len(prefix):]
860
861 return message
862
863 def get_priority(self):
864 return self.data.priority
865
866 def set_priority(self, priority):
867 assert priority in (-2, -1, 0, 1, 2)
868
869 self.db.execute("UPDATE builds SET priority = %s WHERE id = %s", priority,
870 self.id)
871
872 if self._data:
873 self._data["priority"] = priority
874
875 priority = property(get_priority, set_priority)
876
877 @property
878 def path(self):
879 path = []
880 if self.type == "scratch":
881 path.append(BUILD_SCRATCH_DIR)
882 path.append(self.uuid)
883
884 elif self.type == "release":
885 path.append(BUILD_RELEASE_DIR)
886 path.append("%s/%s-%s-%s" % \
887 (self.pkg.name, self.pkg.epoch, self.pkg.version, self.pkg.release))
888
889 else:
890 raise Exception, "Unknown build type: %s" % self.type
891
892 return os.path.join(*path)
893
894 @property
895 def source_filename(self):
896 return os.path.basename(self.pkg.path)
897
898 @property
899 def download_prefix(self):
900 return "/".join((self.pakfire.settings.get("download_baseurl"), "packages"))
901
902 @property
903 def source_download(self):
904 return "/".join((self.download_prefix, self.pkg.path))
905
906 @property
907 def source_hash_sha512(self):
908 return self.pkg.hash_sha512
909
910 @property
911 def link(self):
912 # XXX maybe this should rather live in a uimodule.
913 # zlib-1.2.3-2.ip3 [src, i686, blah...]
914 s = """<a class="state_%s %s" href="/build/%s">%s</a>""" % \
915 (self.state, self.type, self.uuid, self.name)
916
917 s_jobs = []
918 for job in self.jobs:
919 s_jobs.append("""<a class="state_%s %s" href="/job/%s">%s</a>""" % \
920 (job.state, job.type, job.uuid, job.arch))
921
922 if s_jobs:
923 s += " [%s]" % ", ".join(s_jobs)
924
925 return s
926
927 @property
928 def supported_arches(self):
929 return self.pkg.supported_arches
930
931 @property
932 def critical_path(self):
933 return self.pkg.critical_path
934
935 def get_jobs(self, type=None):
936 """
937 Returns a list of jobs of this build.
938 """
939 return self.pakfire.jobs.get_by_build(self.id, self, type=type)
940
941 @property
942 def jobs(self):
943 """
944 Get a list of all build jobs that are in this build.
945 """
946 if self._jobs is None:
947 self._jobs = self.get_jobs(type="build")
948
949 return self._jobs
950
951 @property
952 def test_jobs(self):
953 if self._jobs_test is None:
954 self._jobs_test = self.get_jobs(type="test")
955
956 return self._jobs_test
957
958 @property
959 def all_jobs_finished(self):
960 ret = True
961
962 for job in self.jobs:
963 if not job.state == "finished":
964 ret = False
965 break
966
967 return ret
968
969 def create_autojobs(self, arches=None, type="build"):
970 jobs = []
971
972 # Arches may be passed to this function. If not we use all arches
973 # this package supports.
974 if arches is None:
975 arches = self.supported_arches
976
977 # Create a new job for every given archirecture.
978 for arch in self.pakfire.arches.expand(arches):
979 # Don't create jobs for src.
980 if arch.name == "src":
981 continue
982
983 job = self.add_job(arch, type=type)
984 jobs.append(job)
985
986 # Return all newly created jobs.
987 return jobs
988
989 def add_job(self, arch, type="build"):
990 job = Job.create(self.pakfire, self, arch, type=type)
991
992 # Add new job to cache.
993 if self._jobs:
994 self._jobs.append(job)
995
996 return job
997
998 ## Update stuff
999
1000 @property
1001 def update_id(self):
1002 if not self.type == "release":
1003 return
1004
1005 # Generate an update ID if none does exist, yet.
1006 self.generate_update_id()
1007
1008 s = [
1009 "%s" % self.distro.name.replace(" ", "").upper(),
1010 "%04d" % (self.data.update_year or 0),
1011 "%04d" % (self.data.update_num or 0),
1012 ]
1013
1014 return "-".join(s)
1015
1016 def generate_update_id(self):
1017 if not self.type == "release":
1018 return
1019
1020 if self.data.update_num:
1021 return
1022
1023 update = self.db.get("SELECT update_num AS num FROM builds \
1024 WHERE update_year = YEAR(NOW()) ORDER BY update_num DESC LIMIT 1")
1025
1026 if update:
1027 update_num = update.num + 1
1028 else:
1029 update_num = 1
1030
1031 self.db.execute("UPDATE builds SET update_year = YEAR(NOW()), update_num = %s \
1032 WHERE id = %s", update_num, self.id)
1033
1034 ## Comment stuff
1035
1036 def get_comments(self, limit=10, offset=0):
1037 query = "SELECT * FROM builds_comments \
1038 JOIN users ON builds_comments.user_id = users.id \
1039 WHERE build_id = %s ORDER BY time_created ASC"
1040
1041 comments = []
1042 for comment in self.db.query(query, self.id):
1043 comment = logs.CommentLogEntry(self.pakfire, comment)
1044 comments.append(comment)
1045
1046 return comments
1047
1048 def add_comment(self, user, text, credit):
1049 # Add the new comment to the database.
1050 id = self.db.execute("INSERT INTO \
1051 builds_comments(build_id, user_id, text, credit, time_created) \
1052 VALUES(%s, %s, %s, %s, NOW())",
1053 self.id, user.id, text, credit)
1054
1055 # Update the credit cache.
1056 if not self._credits is None:
1057 self._credits += credit
1058
1059 # Send the new comment to all watchers and stuff.
1060 self.send_comment_message(id)
1061
1062 # Return the ID of the newly created comment.
1063 return id
1064
1065 @property
1066 def score(self):
1067 # XXX UPDATE THIS
1068 if self._credits is None:
1069 # Get the sum of the credits from the database.
1070 query = self.db.get(
1071 "SELECT SUM(credit) as credits FROM builds_comments WHERE build_id = %s",
1072 self.id
1073 )
1074
1075 self._credits = query.credits or 0
1076
1077 return self._credits
1078
1079 @property
1080 def credits(self):
1081 # XXX COMPAT
1082 return self.score
1083
1084 def get_commenters(self):
1085 users = self.db.query("SELECT DISTINCT users.id AS id FROM builds_comments \
1086 JOIN users ON builds_comments.user_id = users.id \
1087 WHERE builds_comments.build_id = %s AND NOT users.deleted = 'Y' \
1088 AND NOT users.activated = 'Y' ORDER BY users.id", self.id)
1089
1090 return [users.User(self.pakfire, u.id) for u in users]
1091
1092 def send_comment_message(self, comment_id):
1093 comment = self.db.get("SELECT * FROM builds_comments WHERE id = %s",
1094 comment_id)
1095
1096 assert comment
1097 assert comment.build_id == self.id
1098
1099 # Get user who wrote the comment.
1100 user = self.pakfire.users.get_by_id(comment.user_id)
1101
1102 format = {
1103 "build_name" : self.name,
1104 "user_name" : user.realname,
1105 }
1106
1107 # XXX create beautiful message
1108
1109 self.pakfire.messages.send_to_all(self.message_recipients,
1110 N_("%(user_name)s commented on %(build_name)s"),
1111 comment.text, format)
1112
1113 ## Logging stuff
1114
1115 def get_log(self, comments=True, repo=True, limit=None):
1116 entries = []
1117
1118 # Created entry.
1119 created_entry = logs.CreatedLogEntry(self.pakfire, self)
1120 entries.append(created_entry)
1121
1122 if comments:
1123 entries += self.get_comments(limit=limit)
1124
1125 if repo:
1126 entries += self.get_repo_moves(limit=limit)
1127
1128 # Sort all entries in chronological order.
1129 entries.sort()
1130
1131 if limit:
1132 entries = entries[:limit]
1133
1134 return entries
1135
1136 ## Watchers stuff
1137
1138 def get_watchers(self):
1139 query = self.db.query("SELECT DISTINCT users.id AS id FROM builds_watchers \
1140 JOIN users ON builds_watchers.user_id = users.id \
1141 WHERE builds_watchers.build_id = %s AND NOT users.deleted = 'Y' \
1142 AND users.activated = 'Y' ORDER BY users.id", self.id)
1143
1144 return [users.User(self.pakfire, u.id) for u in query]
1145
1146 def add_watcher(self, user):
1147 # Don't add a user twice.
1148 if user in self.get_watchers():
1149 return
1150
1151 self.db.execute("INSERT INTO builds_watchers(build_id, user_id) \
1152 VALUES(%s, %s)", self.id, user.id)
1153
1154 @property
1155 def message_recipients(self):
1156 ret = []
1157
1158 for watcher in self.get_watchers():
1159 ret.append("%s <%s>" % (watcher.realname, watcher.email))
1160
1161 return ret
1162
1163 @property
1164 def update(self):
1165 if self._update is None:
1166 update = self.db.get("SELECT update_id AS id FROM updates_builds \
1167 WHERE build_id = %s", self.id)
1168
1169 if update:
1170 self._update = updates.Update(self.pakfire, update.id)
1171
1172 return self._update
1173
1174 @property
1175 def repo(self):
1176 if self._repo is None:
1177 repo = self.db.get("SELECT repo_id AS id FROM repositories_builds \
1178 WHERE build_id = %s", self.id)
1179
1180 if repo:
1181 self._repo = repository.Repository(self.pakfire, repo.id)
1182
1183 return self._repo
1184
1185 def get_repo_moves(self, limit=None):
1186 query = "SELECT * FROM repositories_history \
1187 WHERE build_id = %s ORDER BY time ASC"
1188
1189 actions = []
1190 for action in self.db.query(query, self.id):
1191 action = logs.RepositoryLogEntry(self.pakfire, action)
1192 actions.append(action)
1193
1194 return actions
1195
1196 @property
1197 def is_loose(self):
1198 if self.repo:
1199 return False
1200
1201 return True
1202
1203 @property
1204 def repo_time(self):
1205 repo = self.db.get("SELECT time_added FROM repositories_builds \
1206 WHERE build_id = %s", self.id)
1207
1208 if repo:
1209 return repo.time_added
1210
1211 def get_auto_move(self):
1212 return self.data.auto_move == "Y"
1213
1214 def set_auto_move(self, state):
1215 if state:
1216 state = "Y"
1217 else:
1218 state = "N"
1219
1220 self.db.execute("UPDATE builds SET auto_move = %s WHERE id = %s", self.id)
1221 if self._data:
1222 self._data["auto_move"] = state
1223
1224 auto_move = property(get_auto_move, set_auto_move)
1225
1226 @property
1227 def can_move_forward(self):
1228 if not self.repo:
1229 return False
1230
1231 # If there is no next repository, we cannot move anything.
1232 next_repo = self.repo.next()
1233
1234 if not next_repo:
1235 return False
1236
1237 # If the needed amount of score is reached, we can move forward.
1238 if self.score >= next_repo.score_needed:
1239 return True
1240
1241 # If the repository does not require a minimal time,
1242 # we can move forward immediately.
1243 if not self.repo.time_min:
1244 return True
1245
1246 query = self.db.get("SELECT NOW() - time_added AS duration FROM repositories_builds \
1247 WHERE build_id = %s", self.id)
1248 duration = query.duration
1249
1250 if duration >= self.repo.time_min:
1251 return True
1252
1253 return False
1254
1255 ## Bugs
1256
1257 def get_bug_ids(self):
1258 query = self.db.query("SELECT bug_id FROM builds_bugs \
1259 WHERE build_id = %s", self.id)
1260
1261 return [b.bug_id for b in query]
1262
1263 def add_bug(self, bug_id, user=None, log=True):
1264 # Check if this bug is already in the list of bugs.
1265 if bug_id in self.get_bug_ids():
1266 return
1267
1268 self.db.execute("INSERT INTO builds_bugs(build_id, bug_id) \
1269 VALUES(%s, %s)", self.id, bug_id)
1270
1271 # Log the event.
1272 if log:
1273 self.log("bug_added", user=user, bug_id=bug_id)
1274
1275 def rem_bug(self, bug_id, user=None, log=True):
1276 self.db.execute("DELETE FROM builds_bugs WHERE build_id = %s AND \
1277 bug_id = %s", self.id, bug_id)
1278
1279 # Log the event.
1280 if log:
1281 self.log("bug_removed", user=user, bug_id=bug_id)
1282
1283 def search_for_bugs(self):
1284 if not self.commit:
1285 return
1286
1287 pattern = re.compile(r"(bug\s?|#)(\d+)")
1288
1289 for txt in (self.commit.subject, self.commit.message):
1290 for bug in re.finditer(pattern, txt):
1291 try:
1292 bugid = int(bug.group(2))
1293 except ValueError:
1294 continue
1295
1296 # Check if a bug with the given ID exists in BZ.
1297 bug = self.pakfire.bugzilla.get_bug(bugid)
1298 if not bug:
1299 continue
1300
1301 self.add_bug(bugid)
1302
1303 def get_bugs(self):
1304 bugs = []
1305 for bug_id in self.get_bug_ids():
1306 bug = self.pakfire.bugzilla.get_bug(bug_id)
1307 if not bug:
1308 continue
1309
1310 bugs.append(bug)
1311
1312 return bugs
1313
1314 def _update_bugs_helper(self, repo):
1315 """
1316 This function takes a new status and generates messages that
1317 are appended to all bugs.
1318 """
1319 try:
1320 kwargs = BUG_MESSAGES[repo.type].copy()
1321 except KeyError:
1322 return
1323
1324 baseurl = self.pakfire.settings.get("baseurl", "")
1325 args = {
1326 "build_url" : "%s/build/%s" % (baseurl, self.uuid),
1327 "distro_name" : self.distro.name,
1328 "package_name" : self.name,
1329 "repo_name" : repo.name,
1330 }
1331 kwargs["comment"] = kwargs["comment"] % args
1332
1333 self.update_bugs(**kwargs)
1334
1335 def _update_bug(self, bug_id, status=None, resolution=None, comment=None):
1336 self.db.execute("INSERT INTO builds_bugs_updates(bug_id, status, resolution, comment, time) \
1337 VALUES(%s, %s, %s, %s, NOW())", bug_id, status, resolution, comment)
1338
1339 def update_bugs(self, status, resolution=None, comment=None):
1340 # Update all bugs linked to this build.
1341 for bug_id in self.get_bug_ids():
1342 self._update_bug(bug_id, status=status, resolution=resolution, comment=comment)
1343
1344
1345 class Jobs(base.Object):
1346 def get_by_id(self, id, data=None):
1347 return Job(self.pakfire, id, data)
1348
1349 def get_by_uuid(self, uuid):
1350 job = self.db.get("SELECT id FROM jobs WHERE uuid = %s", uuid)
1351
1352 if job:
1353 return self.get_by_id(job.id)
1354
1355 def get_by_build(self, build_id, build=None, type=None):
1356 """
1357 Get all jobs in the specifies build.
1358 """
1359 query = "SELECT * FROM jobs WHERE build_id = %s"
1360 args = [build_id,]
1361
1362 if type:
1363 query += " AND type = %s"
1364 args.append(type)
1365
1366 # Get IDs of all builds in this group.
1367 jobs = []
1368 for job in self.db.query(query, *args):
1369 job = Job(self.pakfire, job.id, job)
1370
1371 # If the Build object was set, we set it so it won't be retrieved
1372 # from the database again.
1373 if build:
1374 job._build = build
1375
1376 jobs.append(job)
1377
1378 # Return sorted list of jobs.
1379 return sorted(jobs)
1380
1381 def get_active(self, host_id=None, builder=None, states=None):
1382 if builder:
1383 host_id = builder.id
1384
1385 if states is None:
1386 states = ["dispatching", "running", "uploading"]
1387
1388 query = "SELECT * FROM jobs WHERE state IN (%s)" % ", ".join(["%s"] * len(states))
1389 args = states
1390
1391 if host_id:
1392 query += " AND builder_id = %s" % host_id
1393
1394 query += " ORDER BY \
1395 CASE \
1396 WHEN jobs.state = 'running' THEN 0 \
1397 WHEN jobs.state = 'uploading' THEN 1 \
1398 WHEN jobs.state = 'dispatching' THEN 2 \
1399 WHEN jobs.state = 'pending' THEN 3 \
1400 WHEN jobs.state = 'new' THEN 4 \
1401 END, time_started ASC"
1402
1403 return [Job(self.pakfire, j.id, j) for j in self.db.query(query, *args)]
1404
1405 def get_next_iter(self, *args, **kwargs):
1406 return iter(self.get_next(*args, **kwargs))
1407
1408 def get_next(self, arches=None, builder=None, limit=None, offset=None, type=None,
1409 state=None, states=None, max_tries=None):
1410
1411 if state and states is None:
1412 states = [state,]
1413
1414 query = "SELECT * FROM jobs \
1415 INNER JOIN jobs_queue ON jobs.id = jobs_queue.id"
1416 args = []
1417
1418 if arches:
1419 query += " AND jobs_queue.arch IN (%s)" % ", ".join(["%s"] * len(arches))
1420 args.extend(arches)
1421
1422 if builder:
1423 query += " AND jobs_queue.designated_builder_id = %s"
1424 args.append(builder.id)
1425
1426 if max_tries:
1427 query += " AND jobs.max_tries <= %s"
1428 args.append(max_tries)
1429
1430 if states:
1431 query += " AND jobs.state IN (%s)" % ", ".join(["%s"] * len(states))
1432 args.extend(states)
1433
1434 if type:
1435 query += " AND jobs.type = %s"
1436 args.append(type)
1437
1438 if limit:
1439 query += " LIMIT %s"
1440 args.append(limit)
1441
1442 jobs = []
1443 for row in self.db.query(query, *args):
1444 job = self.pakfire.jobs.get_by_id(row.id, row)
1445 jobs.append(job)
1446
1447 # Reverse the order of the builds.
1448 jobs.reverse()
1449
1450 return jobs
1451
1452 def get_latest(self, arch=None, builder=None, limit=None, age=None, date=None):
1453 query = "SELECT * FROM jobs"
1454 args = []
1455
1456 where = ["(state = 'finished' OR state = 'failed' OR state = 'aborted')"]
1457
1458 if arch:
1459 where.append("arch = %s")
1460 args.append(arch)
1461
1462 if builder:
1463 where.append("builder_id = %s")
1464 args.append(builder.id)
1465
1466 if date:
1467 try:
1468 year, month, day = date.split("-", 2)
1469 date = datetime.date(int(year), int(month), int(day))
1470 except ValueError:
1471 pass
1472 else:
1473 where.append("(DATE(time_created) = %s OR \
1474 DATE(time_started) = %s OR DATE(time_finished) = %s)")
1475 args += (date, date, date)
1476
1477 if age:
1478 where.append("time_finished >= NOW() - '%s'::interval" % age)
1479
1480 if where:
1481 query += " WHERE %s" % " AND ".join(where)
1482
1483 query += " ORDER BY time_finished DESC"
1484
1485 if limit:
1486 query += " LIMIT %s"
1487 args.append(limit)
1488
1489 return [Job(self.pakfire, j.id, j) for j in self.db.query(query, *args)]
1490
1491 def get_average_build_time(self):
1492 """
1493 Returns the average build time of all finished builds from the
1494 last 3 months.
1495 """
1496 result = self.db.get("SELECT AVG(time_finished - time_started) as average \
1497 FROM jobs WHERE type = 'build' AND state = 'finished' AND \
1498 time_finished >= NOW() - '3 months'::interval")
1499
1500 if result:
1501 return result.average
1502
1503 def count(self, *states):
1504 query = "SELECT COUNT(*) AS count FROM jobs"
1505 args = []
1506
1507 if states:
1508 query += " WHERE state IN %s"
1509 args.append(states)
1510
1511 jobs = self.db.get(query, *args)
1512 if jobs:
1513 return jobs.count
1514
1515 def get_queue_length(self, state=None):
1516 if state:
1517 res = self.db.get("SELECT COUNT(*) AS count FROM jobs_queue \
1518 LEFT JOIN jobs ON jobs_queue.id = jobs.id WHERE state = %s", state)
1519 else:
1520 res = self.db.get("SELECT COUNT(*) AS count FROM jobs_queue")
1521
1522 if res:
1523 return res.count
1524
1525 return 0
1526
1527 def get_avg_wait_time(self):
1528 res = self.db.get("SELECT AVG(time_waiting) AS time_waiting FROM jobs_waiting")
1529
1530 if res and res.time_waiting:
1531 try:
1532 return int(res.time_waiting)
1533 except ValueError:
1534 return 0
1535
1536 return 0
1537
1538 def get_state_stats(self):
1539 res = self.db.query("SELECT state, COUNT(*) AS count FROM jobs GROUP BY state")
1540
1541 if not res:
1542 return {}
1543
1544 ret = {
1545 "new" : 0,
1546 "pending" : 0,
1547 "running" : 0,
1548 "finished" : 0,
1549 "dispatching" : 0,
1550 "uploading" : 0,
1551 "failed" : 0,
1552 "aborted" : 0,
1553 "temporary_failed" : 0,
1554 "dependency_error" : 0,
1555 "download_error" : 0,
1556 "deleted" : 0,
1557 }
1558 for row in res:
1559 ret[row.state] = int(row.count)
1560
1561 return ret
1562
1563 def get_build_durations(self):
1564 res = self.db.query("SELECT platform, MIN(duration) AS minimum, \
1565 MAX(duration) AS maximum, AVG(duration) AS average, \
1566 STDDEV_POP(duration) AS stddev \
1567 FROM builds_times GROUP BY platform \
1568 UNION SELECT 'all', MIN(duration) AS minimum, \
1569 MAX(duration) AS maximum, AVG(duration) AS average, \
1570 STDDEV_POP(duration) AS stddev \
1571 FROM builds_times")
1572
1573 ret = {}
1574 for row in res:
1575 ret[row.platform] = {
1576 "minimum" : int(row.minimum),
1577 "maximum" : int(row.maximum),
1578 "average" : int(row.average),
1579 "stddev" : int(row.stddev),
1580 }
1581
1582 return ret
1583
1584
1585 class Job(base.Object):
1586 def __init__(self, pakfire, id, data=None):
1587 base.Object.__init__(self, pakfire)
1588
1589 # The ID of this Job object.
1590 self.id = id
1591
1592 # Cache the data of this object.
1593 self._data = data
1594 self._build = None
1595 self._builder = None
1596 self._packages = None
1597 self._logfiles = None
1598
1599 def __str__(self):
1600 return "<%s id=%s %s>" % (self.__class__.__name__, self.id, self.name)
1601
1602 def __cmp__(self, other):
1603 if self.type == "build" and other.type == "test":
1604 return -1
1605 elif self.type == "test" and other.type == "build":
1606 return 1
1607
1608 if self.build_id == other.build_id:
1609 return cmp(self.arch, other.arch)
1610
1611 ret = cmp(self.pkg, other.pkg)
1612
1613 if not ret:
1614 ret = cmp(self.time_created, other.time_created)
1615
1616 return ret
1617
1618 @property
1619 def distro(self):
1620 assert self.build.distro
1621 return self.build.distro
1622
1623 @classmethod
1624 def create(cls, pakfire, build, arch, type="build"):
1625 id = pakfire.db.execute("INSERT INTO jobs(uuid, type, build_id, arch, time_created) \
1626 VALUES(%s, %s, %s, %s, NOW())", "%s" % uuid.uuid4(), type, build.id, arch)
1627
1628 job = Job(pakfire, id)
1629 job.log("created")
1630
1631 # Set cache for Build object.
1632 job._build = build
1633
1634 # Jobs are by default in state "new" and wait for being checked
1635 # for dependencies. Packages that do have no build dependencies
1636 # can directly be forwarded to "pending" state.
1637 if not job.pkg.requires:
1638 job.state = "pending"
1639
1640 return job
1641
1642 def delete(self):
1643 self.__delete_buildroots()
1644 self.__delete_history()
1645 self.__delete_packages()
1646 self.__delete_logfiles()
1647
1648 # Delete the job itself.
1649 self.db.execute("DELETE FROM jobs WHERE id = %s", self.id)
1650
1651 def __delete_buildroots(self):
1652 """
1653 Removes all buildroots.
1654 """
1655 self.db.execute("DELETE FROM jobs_buildroots WHERE job_id = %s", self.id)
1656
1657 def __delete_history(self):
1658 """
1659 Removes all references in the history to this build job.
1660 """
1661 self.db.execute("DELETE FROM jobs_history WHERE job_id = %s", self.id)
1662
1663 def __delete_packages(self):
1664 """
1665 Deletes all uploaded files from the job.
1666 """
1667 for pkg in self.packages:
1668 pkg.delete()
1669
1670 self.db.execute("DELETE FROM jobs_packages WHERE job_id = %s", self.id)
1671
1672 def __delete_logfiles(self):
1673 for logfile in self.logfiles:
1674 self.db.execute("INSERT INTO queue_delete(path) VALUES(%s)", logfile.path)
1675
1676 def reset(self, user=None):
1677 self.__delete_buildroots()
1678 self.__delete_packages()
1679 self.__delete_history()
1680 self.__delete_logfiles()
1681
1682 self.state = "new"
1683 self.log("reset", user=user)
1684
1685 @property
1686 def data(self):
1687 if self._data is None:
1688 self._data = self.db.get("SELECT * FROM jobs WHERE id = %s", self.id)
1689 assert self._data
1690
1691 return self._data
1692
1693 ## Logging stuff
1694
1695 def log(self, action, user=None, state=None, builder=None, test_job=None):
1696 user_id = None
1697 if user:
1698 user_id = user.id
1699
1700 builder_id = None
1701 if builder:
1702 builder_id = builder.id
1703
1704 test_job_id = None
1705 if test_job:
1706 test_job_id = test_job.id
1707
1708 self.db.execute("INSERT INTO jobs_history(job_id, action, state, user_id, \
1709 time, builder_id, test_job_id) VALUES(%s, %s, %s, %s, NOW(), %s, %s)",
1710 self.id, action, state, user_id, builder_id, test_job_id)
1711
1712 def get_log(self, limit=None, offset=None, user=None):
1713 query = "SELECT * FROM jobs_history"
1714
1715 conditions = ["job_id = %s",]
1716 args = [self.id,]
1717
1718 if user:
1719 conditions.append("user_id = %s")
1720 args.append(user.id)
1721
1722 if conditions:
1723 query += " WHERE %s" % " AND ".join(conditions)
1724
1725 query += " ORDER BY time DESC"
1726
1727 if limit:
1728 if offset:
1729 query += " LIMIT %s,%s"
1730 args += [offset, limit,]
1731 else:
1732 query += " LIMIT %s"
1733 args += [limit,]
1734
1735 entries = []
1736 for entry in self.db.query(query, *args):
1737 entry = logs.JobLogEntry(self.pakfire, entry)
1738 entries.append(entry)
1739
1740 return entries
1741
1742 @property
1743 def uuid(self):
1744 return self.data.uuid
1745
1746 @property
1747 def type(self):
1748 return self.data.type
1749
1750 @property
1751 def build_id(self):
1752 return self.data.build_id
1753
1754 @property
1755 def build(self):
1756 if self._build is None:
1757 self._build = self.pakfire.builds.get_by_id(self.build_id)
1758 assert self._build
1759
1760 return self._build
1761
1762 @property
1763 def related_jobs(self):
1764 ret = []
1765
1766 for job in self.build.jobs:
1767 if job == self:
1768 continue
1769
1770 ret.append(job)
1771
1772 return ret
1773
1774 @property
1775 def pkg(self):
1776 return self.build.pkg
1777
1778 @property
1779 def name(self):
1780 return "%s-%s.%s" % (self.pkg.name, self.pkg.friendly_version, self.arch)
1781
1782 @property
1783 def size(self):
1784 return sum((p.size for p in self.packages))
1785
1786 def is_running(self):
1787 """
1788 Returns True if job is in a running state.
1789 """
1790 return self.state in ("pending", "dispatching", "running", "uploading")
1791
1792 def get_state(self):
1793 return self.data.state
1794
1795 def set_state(self, state, user=None, log=True):
1796 # Nothing to do if the state remains.
1797 if not self.state == state:
1798 self.db.execute("UPDATE jobs SET state = %s WHERE id = %s", state, self.id)
1799
1800 # Log the event.
1801 if log and not state == "new":
1802 self.log("state_change", state=state, user=user)
1803
1804 # Update cache.
1805 if self._data:
1806 self._data["state"] = state
1807
1808 # Always clear the message when the status is changed.
1809 self.update_message(None)
1810
1811 # Update some more informations.
1812 if state == "dispatching":
1813 # Set start time.
1814 self.db.execute("UPDATE jobs SET time_started = NOW(), time_finished = NULL \
1815 WHERE id = %s", self.id)
1816
1817 elif state == "pending":
1818 self.db.execute("UPDATE jobs SET tries = tries + 1, time_started = NULL, \
1819 time_finished = NULL WHERE id = %s", self.id)
1820
1821 elif state in ("aborted", "dependency_error", "finished", "failed"):
1822 # Set finish time and reset builder..
1823 self.db.execute("UPDATE jobs SET time_finished = NOW() WHERE id = %s", self.id)
1824
1825 # Send messages to the user.
1826 if state == "finished":
1827 self.send_finished_message()
1828
1829 elif state == "failed":
1830 # Remove all package files if a job is set to failed state.
1831 self.__delete_packages()
1832
1833 self.send_failed_message()
1834
1835 # Automatically update the state of the build (not on test builds).
1836 if self.type == "build":
1837 self.build.auto_update_state()
1838
1839 state = property(get_state, set_state)
1840
1841 @property
1842 def message(self):
1843 return self.data.message
1844
1845 def update_message(self, msg):
1846 self.db.execute("UPDATE jobs SET message = %s WHERE id = %s",
1847 msg, self.id)
1848
1849 if self._data:
1850 self._data["message"] = msg
1851
1852 @property
1853 def builder_id(self):
1854 return self.data.builder_id
1855
1856 def get_builder(self):
1857 if not self.builder_id:
1858 return
1859
1860 if self._builder is None:
1861 self._builder = builders.Builder(self.pakfire, self.builder_id)
1862 assert self._builder
1863
1864 return self._builder
1865
1866 def set_builder(self, builder, user=None):
1867 self.db.execute("UPDATE jobs SET builder_id = %s WHERE id = %s",
1868 builder.id, self.id)
1869
1870 # Update cache.
1871 if self._data:
1872 self._data["builder_id"] = builder.id
1873
1874 self._builder = builder
1875
1876 # Log the event.
1877 if user:
1878 self.log("builder_assigned", builder=builder, user=user)
1879
1880 builder = property(get_builder, set_builder)
1881
1882 @property
1883 def arch(self):
1884 return self.data.arch
1885
1886 @lazy_property
1887 def _arch(self):
1888 return self.backend.arches.get_by_name(self.arch)
1889
1890 @property
1891 def duration(self):
1892 if not self.time_started:
1893 return 0
1894
1895 if self.time_finished:
1896 delta = self.time_finished - self.time_started
1897 else:
1898 delta = datetime.datetime.utcnow() - self.time_started
1899
1900 return delta.total_seconds()
1901
1902 @property
1903 def time_created(self):
1904 return self.data.time_created
1905
1906 @property
1907 def time_started(self):
1908 return self.data.time_started
1909
1910 @property
1911 def time_finished(self):
1912 return self.data.time_finished
1913
1914 @property
1915 def expected_runtime(self):
1916 """
1917 Returns the estimated time and stddev, this job takes to finish.
1918 """
1919 # Get the average build time.
1920 build_times = self.pakfire.builds.get_build_times_by_arch(self.arch,
1921 name=self.pkg.name)
1922
1923 # If there is no statistical data, we cannot estimate anything.
1924 if not build_times:
1925 return None, None
1926
1927 return build_times.average, build_times.stddev
1928
1929 @property
1930 def eta(self):
1931 expected_runtime, stddev = self.expected_runtime
1932
1933 if expected_runtime:
1934 return expected_runtime - int(self.duration), stddev
1935
1936 @property
1937 def tries(self):
1938 return self.data.tries
1939
1940 @property
1941 def packages(self):
1942 if self._packages is None:
1943 self._packages = []
1944
1945 query = "SELECT pkg_id AS id FROM jobs_packages \
1946 JOIN packages ON packages.id = jobs_packages.pkg_id \
1947 WHERE jobs_packages.job_id = %s ORDER BY packages.name"
1948
1949 for pkg in self.db.query(query, self.id):
1950 pkg = packages.Package(self.pakfire, pkg.id)
1951 pkg._job = self
1952
1953 self._packages.append(pkg)
1954
1955 return self._packages
1956
1957 def get_pkg_by_uuid(self, uuid):
1958 pkg = self.db.get("SELECT packages.id FROM packages \
1959 JOIN jobs_packages ON jobs_packages.pkg_id = packages.id \
1960 WHERE jobs_packages.job_id = %s AND packages.uuid = %s",
1961 self.id, uuid)
1962
1963 if not pkg:
1964 return
1965
1966 pkg = packages.Package(self.pakfire, pkg.id)
1967 pkg._job = self
1968
1969 return pkg
1970
1971 @property
1972 def logfiles(self):
1973 if self._logfiles is None:
1974 self._logfiles = []
1975
1976 for log in self.db.query("SELECT id FROM logfiles WHERE job_id = %s", self.id):
1977 log = logs.LogFile(self.pakfire, log.id)
1978 log._job = self
1979
1980 self._logfiles.append(log)
1981
1982 return self._logfiles
1983
1984 def add_file(self, filename):
1985 """
1986 Add the specified file to this job.
1987
1988 The file is copied to the right directory by this function.
1989 """
1990 assert os.path.exists(filename)
1991
1992 if filename.endswith(".log"):
1993 self._add_file_log(filename)
1994
1995 elif filename.endswith(".%s" % PACKAGE_EXTENSION):
1996 # It is not allowed to upload packages on test builds.
1997 if self.type == "test":
1998 return
1999
2000 self._add_file_package(filename)
2001
2002 def _add_file_log(self, filename):
2003 """
2004 Attach a log file to this job.
2005 """
2006 target_dirname = os.path.join(self.build.path, "logs")
2007
2008 if self.type == "test":
2009 i = 1
2010 while True:
2011 target_filename = os.path.join(target_dirname,
2012 "test.%s.%s.%s.log" % (self.arch, i, self.tries))
2013
2014 if os.path.exists(target_filename):
2015 i += 1
2016 else:
2017 break
2018 else:
2019 target_filename = os.path.join(target_dirname,
2020 "build.%s.%s.log" % (self.arch, self.tries))
2021
2022 # Make sure the target directory exists.
2023 if not os.path.exists(target_dirname):
2024 os.makedirs(target_dirname)
2025
2026 # Calculate a SHA512 hash from that file.
2027 f = open(filename, "rb")
2028 h = hashlib.sha512()
2029 while True:
2030 buf = f.read(BUFFER_SIZE)
2031 if not buf:
2032 break
2033
2034 h.update(buf)
2035 f.close()
2036
2037 # Copy the file to the final location.
2038 shutil.copy2(filename, target_filename)
2039
2040 # Create an entry in the database.
2041 self.db.execute("INSERT INTO logfiles(job_id, path, filesize, hash_sha512) \
2042 VALUES(%s, %s, %s, %s)", self.id, os.path.relpath(target_filename, PACKAGES_DIR),
2043 os.path.getsize(target_filename), h.hexdigest())
2044
2045 def _add_file_package(self, filename):
2046 # Open package (creates entry in the database).
2047 pkg = packages.Package.open(self.pakfire, filename)
2048
2049 # Move package to the build directory.
2050 pkg.move(os.path.join(self.build.path, self.arch))
2051
2052 # Attach the package to this job.
2053 self.db.execute("INSERT INTO jobs_packages(job_id, pkg_id) VALUES(%s, %s)",
2054 self.id, pkg.id)
2055
2056 def get_aborted_state(self):
2057 return self.data.aborted_state
2058
2059 def set_aborted_state(self, state):
2060 self.db.execute("UPDATE jobs SET aborted_state = %s WHERE id = %s",
2061 state, self.id)
2062
2063 if self._data:
2064 self._data["aborted_state"] = state
2065
2066 aborted_state = property(get_aborted_state, set_aborted_state)
2067
2068 @property
2069 def message_recipients(self):
2070 l = []
2071
2072 # Add all people watching the build.
2073 l += self.build.message_recipients
2074
2075 # Add the package maintainer on release builds.
2076 if self.build.type == "release":
2077 maint = self.pkg.maintainer
2078
2079 if isinstance(maint, users.User):
2080 l.append("%s <%s>" % (maint.realname, maint.email))
2081 elif maint:
2082 l.append(maint)
2083
2084 # XXX add committer and commit author.
2085
2086 # Add the owner of the scratch build on scratch builds.
2087 elif self.build.type == "scratch" and self.build.user:
2088 l.append("%s <%s>" % \
2089 (self.build.user.realname, self.build.user.email))
2090
2091 return set(l)
2092
2093 def save_buildroot(self, pkgs):
2094 rows = []
2095
2096 for pkg_name, pkg_uuid in pkgs:
2097 rows.append((self.id, self.tries, pkg_uuid, pkg_name))
2098
2099 # Cleanup old stuff first (for rebuilding packages).
2100 self.db.execute("DELETE FROM jobs_buildroots WHERE job_id = %s AND tries = %s",
2101 self.id, self.tries)
2102
2103 self.db.executemany("INSERT INTO \
2104 jobs_buildroots(job_id, tries, pkg_uuid, pkg_name) \
2105 VALUES(%s, %s, %s, %s)", rows)
2106
2107 def has_buildroot(self, tries=None):
2108 if tries is None:
2109 tries = self.tries
2110
2111 res = self.db.get("SELECT COUNT(*) AS num FROM jobs_buildroots \
2112 WHERE jobs_buildroots.job_id = %s AND jobs_buildroots.tries = %s \
2113 ORDER BY pkg_name", self.id, tries)
2114
2115 if res:
2116 return res.num
2117
2118 return 0
2119
2120 def get_buildroot(self, tries=None):
2121 if tries is None:
2122 tries = self.tries
2123
2124 rows = self.db.query("SELECT * FROM jobs_buildroots \
2125 WHERE jobs_buildroots.job_id = %s AND jobs_buildroots.tries = %s \
2126 ORDER BY pkg_name", self.id, tries)
2127
2128 pkgs = []
2129 for row in rows:
2130 # Search for this package in the packages table.
2131 pkg = self.pakfire.packages.get_by_uuid(row.pkg_uuid)
2132 pkgs.append((row.pkg_name, row.pkg_uuid, pkg))
2133
2134 return pkgs
2135
2136 def send_finished_message(self):
2137 # Send no finished mails for test jobs.
2138 if self.type == "test":
2139 return
2140
2141 logging.debug("Sending finished message for job %s to %s" % \
2142 (self.name, ", ".join(self.message_recipients)))
2143
2144 info = {
2145 "build_name" : self.name,
2146 "build_host" : self.builder.name,
2147 "build_uuid" : self.uuid,
2148 }
2149
2150 self.pakfire.messages.send_to_all(self.message_recipients,
2151 MSG_BUILD_FINISHED_SUBJECT, MSG_BUILD_FINISHED, info)
2152
2153 def send_failed_message(self):
2154 logging.debug("Sending failed message for job %s to %s" % \
2155 (self.name, ", ".join(self.message_recipients)))
2156
2157 build_host = "--"
2158 if self.builder:
2159 build_host = self.builder.name
2160
2161 info = {
2162 "build_name" : self.name,
2163 "build_host" : build_host,
2164 "build_uuid" : self.uuid,
2165 }
2166
2167 self.pakfire.messages.send_to_all(self.message_recipients,
2168 MSG_BUILD_FAILED_SUBJECT, MSG_BUILD_FAILED, info)
2169
2170 def set_start_time(self, start_time):
2171 if start_time is None:
2172 return
2173
2174 self.db.execute("UPDATE jobs SET start_not_before = NOW() + %s \
2175 WHERE id = %s LIMIT 1", start_time, self.id)
2176
2177 def schedule(self, type, start_time=None, user=None):
2178 assert type in ("rebuild", "test")
2179
2180 if type == "rebuild":
2181 if self.state == "finished":
2182 return
2183
2184 self.set_state("new", user=user, log=False)
2185 self.set_start_time(start_time)
2186
2187 # Log the event.
2188 self.log("schedule_rebuild", user=user)
2189
2190 elif type == "test":
2191 if not self.state == "finished":
2192 return
2193
2194 # Create a new job with same build and arch.
2195 job = self.create(self.pakfire, self.build, self.arch, type="test")
2196 job.set_start_time(start_time)
2197
2198 # Log the event.
2199 self.log("schedule_test_job", test_job=job, user=user)
2200
2201 return job
2202
2203 def schedule_test(self, start_not_before=None, user=None):
2204 # XXX to be removed
2205 return self.schedule("test", start_time=start_not_before, user=user)
2206
2207 def schedule_rebuild(self, start_not_before=None, user=None):
2208 # XXX to be removed
2209 return self.schedule("rebuild", start_time=start_not_before, user=user)
2210
2211 def get_build_repos(self):
2212 """
2213 Returns a list of all repositories that should be used when
2214 building this job.
2215 """
2216 repo_ids = self.db.query("SELECT repo_id FROM jobs_repos WHERE job_id = %s",
2217 self.id)
2218
2219 if not repo_ids:
2220 return self.distro.get_build_repos()
2221
2222 repos = []
2223 for repo in self.distro.repositories:
2224 if repo.id in [r.id for r in repo_ids]:
2225 repos.append(repo)
2226
2227 return repos or self.distro.get_build_repos()
2228
2229 def get_repo_config(self):
2230 """
2231 Get repository configuration file that is sent to the builder.
2232 """
2233 confs = []
2234
2235 for repo in self.get_build_repos():
2236 confs.append(repo.get_conf())
2237
2238 return "\n\n".join(confs)
2239
2240 def get_config(self):
2241 """
2242 Get configuration file that is sent to the builder.
2243 """
2244 confs = []
2245
2246 # Add the distribution configuration.
2247 confs.append(self.distro.get_config())
2248
2249 # Then add all repositories for this build.
2250 confs.append(self.get_repo_config())
2251
2252 return "\n\n".join(confs)
2253
2254 def used_by(self):
2255 if not self.packages:
2256 return []
2257
2258 conditions = []
2259 args = []
2260
2261 for pkg in self.packages:
2262 conditions.append(" pkg_uuid = %s")
2263 args.append(pkg.uuid)
2264
2265 query = "SELECT DISTINCT job_id AS id FROM jobs_buildroots"
2266 query += " WHERE %s" % " OR ".join(conditions)
2267
2268 job_ids = self.db.query(query, *args)
2269
2270 print job_ids
2271
2272 def resolvdep(self):
2273 config = pakfire.config.Config(files=["general.conf"])
2274 config.parse(self.get_config())
2275
2276 # The filename of the source file.
2277 filename = os.path.join(PACKAGES_DIR, self.build.pkg.path)
2278 assert os.path.exists(filename), filename
2279
2280 # Create a new pakfire instance with the configuration for
2281 # this build.
2282 p = pakfire.PakfireServer(config=config, arch=self.arch)
2283
2284 # Try to solve the build dependencies.
2285 try:
2286 solver = p.resolvdep(filename)
2287
2288 # Catch dependency errors and log the problem string.
2289 except DependencyError, e:
2290 self.state = "dependency_error"
2291 self.update_message(e)
2292
2293 else:
2294 # If the build dependencies can be resolved, we set the build in
2295 # pending state.
2296 if solver.status is True:
2297 if self.state in ("failed",):
2298 return
2299
2300 self.state = "pending"