13 log
= logging
.getLogger("builds")
21 from .constants
import *
22 from .decorators
import *
24 class Jobs(base
.Object
):
25 def _get_job(self
, query
, *args
):
26 res
= self
.db
.get(query
, *args
)
29 return Job(self
.backend
, res
.id, data
=res
)
31 def _get_jobs(self
, query
, *args
):
32 res
= self
.db
.query(query
, *args
)
35 yield Job(self
.backend
, row
.id, data
=row
)
37 def create(self
, build
, arch
, test
=False, superseeds
=None):
38 job
= self
._get
_job
("INSERT INTO jobs(uuid, build_id, arch, test) \
39 VALUES(%s, %s, %s, %s) RETURNING *", "%s" % uuid
.uuid4(), build
.id, arch
, test
)
42 # Set cache for Build object.
45 # Mark if the new job superseeds some other job
47 superseeds
.superseeded_by
= job
51 def get_by_id(self
, id):
52 return self
._get
_job
("SELECT * FROM jobs WHERE id = %s", id)
54 def get_by_uuid(self
, uuid
):
55 return self
._get
_job
("SELECT * FROM jobs WHERE uuid = %s", uuid
)
57 def get_active(self
, limit
=None):
58 jobs
= self
._get
_jobs
("SELECT jobs.* FROM jobs \
59 WHERE time_started IS NOT NULL AND time_finished IS NULL \
60 ORDER BY time_started LIMIT %s", limit
)
64 def get_recently_ended(self
, limit
=None):
65 jobs
= self
._get
_jobs
("SELECT jobs.* FROM jobs \
66 WHERE time_finished IS NOT NULL ORDER BY time_finished DESC LIMIT %s", limit
)
70 def restart_failed(self
):
71 jobs
= self
._get
_jobs
("SELECT jobs.* FROM jobs \
72 JOIN builds ON builds.id = jobs.build_id \
74 jobs.type = 'build' AND \
75 jobs.state = 'failed' AND \
76 NOT builds.state = 'broken' AND \
77 jobs.time_finished < NOW() - '72 hours'::interval \
80 WHEN jobs.type = 'build' THEN 0 \
81 WHEN jobs.type = 'test' THEN 1 \
83 builds.priority DESC, jobs.time_created ASC")
90 class Job(base
.DataObject
):
94 return "<%s id=%s %s>" % (self
.__class
__.__name
__, self
.id, self
.name
)
96 def __eq__(self
, other
):
97 if isinstance(other
, self
.__class
__):
98 return self
.id == other
.id
100 def __lt__(self
, other
):
101 if isinstance(other
, self
.__class
__):
102 if not self
.test
and other
.test
:
105 if self
.build
== other
.build
:
106 return arches
.priority(self
.arch
) < arches
.priority(other
.arch
)
108 return self
.time_created
< other
.time_created
111 packages
= self
.backend
.packages
._get
_packages
("SELECT packages.* FROM jobs_packages \
112 LEFT JOIN packages ON jobs_packages.pkg_id = packages.id \
113 WHERE jobs_packages.job_id = %s ORDER BY packages.name", self
.id)
115 return iter(packages
)
117 def __nonzero__(self
):
121 res
= self
.db
.get("SELECT COUNT(*) AS len FROM jobs_packages \
122 WHERE job_id = %s", self
.id)
128 return self
.data
.uuid
132 return "%s-%s.%s" % (self
.pkg
.name
, self
.pkg
.friendly_version
, self
.arch
)
136 return self
.data
.build_id
140 return self
.backend
.builds
.get_by_id(self
.build_id
)
144 return self
.data
.test
147 def related_jobs(self
):
150 for job
in self
.build
.jobs
:
160 return self
.build
.pkg
164 return sum((p
.size
for p
in self
.packages
))
169 Returns the rank in the build queue
171 if not self
.state
== "pending":
174 res
= self
.db
.get("SELECT rank FROM jobs_queue WHERE job_id = %s", self
.id)
181 return self
.build
.distro
183 def get_superseeded_by(self
):
184 if self
.data
.superseeded_by
:
185 return self
.backend
.jobs
.get_by_id(self
.data
.superseeded_by
)
187 def set_superseeded_by(self
, superseeded_by
):
188 assert isinstance(superseeded_by
, self
.__class
__)
190 self
._set
_attribute
("superseeded_by", superseeded_by
.id)
192 superseeded_by
= lazy_property(get_superseeded_by
, set_superseeded_by
)
194 def start(self
, builder
):
196 Starts this job on builder
198 self
.builder
= builder
200 # Start to dispatch the build job
201 self
.state
= "dispatching"
204 self
.state
= "running"
207 self
.time_started
= datetime
.datetime
.utcnow()
208 self
.time_finished
= None
211 self
.state
= "finished"
214 self
.time_finished
= datetime
.datetime
.utcnow()
217 self
.send_finished_message()
219 def failed(self
, message
):
220 self
.state
= "failed"
221 self
.message
= message
224 self
.time_finished
= datetime
.datetime
.utcnow()
227 self
.send_failed_message()
229 def restart(self
, test
=None, start_not_before
=None):
230 # Copy the job and let it build again
231 job
= self
.backend
.jobs
.create(self
.build
, self
.arch
,
232 test
=test
or self
.test
, superseeds
=self
)
235 job
.start_not_before
= start_not_before
241 Deletes a job from the database
243 # Remove the buildroot
244 self
.db
.execute("DELETE FROM jobs_buildroots WHERE job_id = %s", self
.id)
247 self
.db
.execute("DELETE FROM jobs_history WHERE job_id = %s", self
.id)
249 # Delete all packages
251 self
.db
.execute("DELETE FROM jobs_packages \
252 WHERE job_id = %s AND pkg_id = %s", self
.id, pkg
.id)
255 # Remove all logfiles
256 for logfile
in self
.logfiles
:
257 self
.backend
.delete_file(os
.path
.join(PACKAGES_DIR
, logfile
.path
))
259 self
.db
.execute("DELETE FROM logfiles WHERE job_id = %s", self
.id)
261 # Delete the job itself.
262 self
.db
.execute("DELETE FROM jobs WHERE id = %s", self
.id)
266 def log(self
, action
, user
=None, state
=None, builder
=None, test_job
=None):
273 builder_id
= builder
.id
277 test_job_id
= test_job
.id
279 self
.db
.execute("INSERT INTO jobs_history(job_id, action, state, user_id, \
280 time, builder_id, test_job_id) VALUES(%s, %s, %s, %s, NOW(), %s, %s)",
281 self
.id, action
, state
, user_id
, builder_id
, test_job_id
)
283 def get_log(self
, limit
=None, offset
=None, user
=None):
284 query
= "SELECT * FROM jobs_history"
286 conditions
= ["job_id = %s",]
290 conditions
.append("user_id = %s")
294 query
+= " WHERE %s" % " AND ".join(conditions
)
296 query
+= " ORDER BY time DESC"
300 query
+= " LIMIT %s,%s"
301 args
+= [offset
, limit
,]
307 for entry
in self
.db
.query(query
, *args
):
308 entry
= logs
.JobLogEntry(self
.backend
, entry
)
309 entries
.append(entry
)
313 def is_running(self
):
315 Returns True if job is in a running state.
317 return self
.state
in ("pending", "dispatching", "running", "uploading")
320 return self
.data
.state
322 def set_state(self
, state
):
323 self
._set
_attribute
("state", state
)
325 # Automatically update the state of the build (not on test builds)
327 self
.build
.auto_update_state()
329 state
= property(get_state
, set_state
)
331 def set_message(self
, message
):
333 message
= "%s" % message
335 self
._set
_attribute
("message", message
)
337 message
= property(lambda s
: s
.data
.message
, set_message
)
339 def get_builder(self
):
340 if self
.data
.builder_id
:
341 return self
.backend
.builders
.get_by_id(self
.data
.builder_id
)
343 def set_builder(self
, builder
, user
=None):
344 log
.info("Builder %s has been assigned to %s" % (builder
.name
, self
.name
))
346 self
._set
_attribute
("builder_id", builder
.id)
350 self
.log("builder_assigned", builder
=builder
, user
=user
)
352 builder
= lazy_property(get_builder
, set_builder
)
355 def candidate_builders(self
):
357 Returns all active builders that could build this job
359 builders
= self
.backend
.builders
.get_for_arch(self
.arch
)
361 # Remove all builders that are not available
362 builders
= (b
for b
in builders
if b
.enabled
and b
.is_online())
364 # Remove all builders that have too many jobs
365 builders
= (b
for b
in builders
if not b
.too_many_jobs
)
367 # Sort them by the fastest builder first
368 return sorted(builders
, key
=lambda b
: -b
.performance_index
)
371 def designated_builder(self
):
373 Returns the fastest candidate builder builder
375 if self
.candidate_builders
:
376 return self
.candidate_builders
[0]
380 return self
.data
.arch
384 if not self
.time_started
:
387 if self
.time_finished
:
388 delta
= self
.time_finished
- self
.time_started
390 delta
= datetime
.datetime
.utcnow() - self
.time_started
392 return delta
.total_seconds()
395 def time_created(self
):
396 return self
.data
.time_created
398 def set_time_started(self
, time_started
):
399 self
._set
_attribute
("time_started", time_started
)
401 time_started
= property(lambda s
: s
.data
.time_started
, set_time_started
)
403 def set_time_finished(self
, time_finished
):
404 self
._set
_attribute
("time_finished", time_finished
)
406 time_finished
= property(lambda s
: s
.data
.time_finished
, set_time_finished
)
408 def set_start_not_before(self
, start_not_before
):
409 self
._set
_attribute
("start_not_before", start_not_before
)
411 start_not_before
= property(lambda s
: s
.data
.start_not_before
, set_start_not_before
)
413 def get_pkg_by_uuid(self
, uuid
):
414 pkg
= self
.backend
.packages
._get
_package
("SELECT packages.id FROM packages \
415 JOIN jobs_packages ON jobs_packages.pkg_id = packages.id \
416 WHERE jobs_packages.job_id = %s AND packages.uuid = %s",
427 for log
in self
.db
.query("SELECT id FROM logfiles WHERE job_id = %s", self
.id):
428 log
= logs
.LogFile(self
.backend
, log
.id)
435 def add_file(self
, filename
):
437 Add the specified file to this job.
439 The file is copied to the right directory by this function.
441 assert os
.path
.exists(filename
)
443 if filename
.endswith(".log"):
444 self
._add
_file
_log
(filename
)
446 elif filename
.endswith(".%s" % PACKAGE_EXTENSION
):
447 # It is not allowed to upload packages on test builds.
451 self
._add
_file
_package
(filename
)
453 def _add_file_log(self
, filename
):
455 Attach a log file to this job.
457 target_dirname
= os
.path
.join(self
.build
.path
, "logs")
462 target_filename
= os
.path
.join(target_dirname
,
463 "test.%s.%s.%s.log" % (self
.arch
, i
, self
.uuid
))
465 if os
.path
.exists(target_filename
):
470 target_filename
= os
.path
.join(target_dirname
,
471 "build.%s.%s.log" % (self
.arch
, self
.uuid
))
473 # Make sure the target directory exists.
474 if not os
.path
.exists(target_dirname
):
475 os
.makedirs(target_dirname
)
477 # Calculate a SHA512 hash from that file.
478 f
= open(filename
, "rb")
481 buf
= f
.read(BUFFER_SIZE
)
488 # Copy the file to the final location.
489 shutil
.copy2(filename
, target_filename
)
491 # Create an entry in the database.
492 self
.db
.execute("INSERT INTO logfiles(job_id, path, filesize, hash_sha512) \
493 VALUES(%s, %s, %s, %s)", self
.id, os
.path
.relpath(target_filename
, PACKAGES_DIR
),
494 os
.path
.getsize(target_filename
), h
.hexdigest())
496 def _add_file_package(self
, filename
):
497 # Open package (creates entry in the database)
498 pkg
= self
.backend
.packages
.create(filename
)
500 # Move package to the build directory.
501 pkg
.move(os
.path
.join(self
.build
.path
, self
.arch
))
503 # Attach the package to this job.
504 self
.db
.execute("INSERT INTO jobs_packages(job_id, pkg_id) VALUES(%s, %s)",
507 def get_aborted_state(self
):
508 return self
.data
.aborted_state
510 def set_aborted_state(self
, state
):
511 self
._set
_attribute
("aborted_state", state
)
513 aborted_state
= property(get_aborted_state
, set_aborted_state
)
516 def message_recipients(self
):
519 # Add all people watching the build.
520 l
+= self
.build
.message_recipients
522 # Add the package maintainer on release builds.
523 if self
.build
.type == "release":
524 maint
= self
.pkg
.maintainer
526 if isinstance(maint
, users
.User
):
527 l
.append("%s <%s>" % (maint
.realname
, maint
.email
))
531 # XXX add committer and commit author.
533 # Add the owner of the scratch build on scratch builds.
534 elif self
.build
.type == "scratch" and self
.build
.user
:
535 l
.append("%s <%s>" % \
536 (self
.build
.user
.realname
, self
.build
.user
.email
))
540 def save_buildroot(self
, pkgs
):
541 # Cleanup old stuff first (for rebuilding packages)
542 self
.db
.execute("DELETE FROM jobs_buildroots WHERE job_id = %s", self
.id)
544 for pkg_name
, pkg_uuid
in pkgs
:
545 self
.db
.execute("INSERT INTO jobs_buildroots(job_id, pkg_uuid, pkg_name) \
546 VALUES(%s, %s, %s)", self
.id, pkg_name
, pkg_uuid
)
550 rows
= self
.db
.query("SELECT * FROM jobs_buildroots \
551 WHERE jobs_buildroots.job_id = %s ORDER BY pkg_name", self
.id)
555 # Search for this package in the packages table.
556 pkg
= self
.backend
.packages
.get_by_uuid(row
.pkg_uuid
)
557 pkgs
.append((row
.pkg_name
, row
.pkg_uuid
, pkg
))
561 def send_finished_message(self
):
562 # Send no finished mails for test jobs.
566 logging
.debug("Sending finished message for job %s to %s" % \
567 (self
.name
, ", ".join(self
.message_recipients
)))
570 "build_name" : self
.name
,
571 "build_host" : self
.builder
.name
,
572 "build_uuid" : self
.uuid
,
575 self
.backend
.messages
.send_to_all(self
.message_recipients
,
576 MSG_BUILD_FINISHED_SUBJECT
, MSG_BUILD_FINISHED
, info
)
578 def send_failed_message(self
):
579 logging
.debug("Sending failed message for job %s to %s" % \
580 (self
.name
, ", ".join(self
.message_recipients
)))
584 build_host
= self
.builder
.name
587 "build_name" : self
.name
,
588 "build_host" : build_host
,
589 "build_uuid" : self
.uuid
,
592 self
.backend
.messages
.send_to_all(self
.message_recipients
,
593 MSG_BUILD_FAILED_SUBJECT
, MSG_BUILD_FAILED
, info
)
595 def get_build_repos(self
):
597 Returns a list of all repositories that should be used when
600 repo_ids
= self
.db
.query("SELECT repo_id FROM jobs_repos WHERE job_id = %s",
604 return self
.distro
.get_build_repos()
607 for repo
in self
.distro
.repositories
:
608 if repo
.id in [r
.id for r
in repo_ids
]:
611 return repos
or self
.distro
.get_build_repos()
613 def get_config(self
, local
=False):
615 Get configuration file that is sent to the builder.
619 # Add the distribution configuration.
620 confs
.append(self
.distro
.get_config())
622 # Then add all repositories for this build.
623 for repo
in self
.get_build_repos():
624 conf
= repo
.get_conf(local
=local
)
627 return "\n\n".join(confs
)
629 def set_dependency_check_succeeded(self
, value
):
630 self
._set
_attribute
("dependency_check_succeeded", value
)
631 self
._set
_attribute
("dependency_check_at", datetime
.datetime
.utcnow())
637 dependency_check_succeeded
= property(
638 lambda s
: s
.data
.dependency_check_succeeded
,
639 set_dependency_check_succeeded
)
642 log
.info("Processing dependencies for %s..." % self
)
644 config
= pakfire
.config
.Config(files
=["general.conf"])
645 config
.parse(self
.get_config(local
=True))
647 # The filename of the source file.
648 filename
= os
.path
.join(PACKAGES_DIR
, self
.build
.pkg
.path
)
649 assert os
.path
.exists(filename
), filename
651 # Create a new pakfire instance with the configuration for
653 p
= pakfire
.PakfireServer(config
=config
, arch
=self
.arch
)
655 # Try to solve the build dependencies.
657 solver
= p
.resolvdep(filename
)
659 # Catch dependency errors and log the problem string.
660 except DependencyError
, e
:
661 self
.dependency_check_succeeded
= False
664 # The dependency check has succeeded
666 self
.dependency_check_succeeded
= True