13 log
= logging
.getLogger("builds")
21 from .constants
import *
22 from .decorators
import *
24 class Jobs(base
.Object
):
25 def _get_job(self
, query
, *args
):
26 res
= self
.db
.get(query
, *args
)
29 return Job(self
.backend
, res
.id, data
=res
)
31 def _get_jobs(self
, query
, *args
):
32 res
= self
.db
.query(query
, *args
)
35 yield Job(self
.backend
, row
.id, data
=row
)
37 def create(self
, build
, arch
, test
=False, superseeds
=None):
38 job
= self
._get
_job
("INSERT INTO jobs(uuid, build_id, arch, test) \
39 VALUES(%s, %s, %s, %s) RETURNING *", "%s" % uuid
.uuid4(), build
.id, arch
, test
)
42 # Set cache for Build object.
45 # Mark if the new job superseeds some other job
47 superseeds
.superseeded_by
= job
51 def get_by_id(self
, id):
52 return self
._get
_job
("SELECT * FROM jobs WHERE id = %s", id)
54 def get_by_uuid(self
, uuid
):
55 return self
._get
_job
("SELECT * FROM jobs WHERE uuid = %s", uuid
)
57 def get_active(self
, limit
=None):
58 jobs
= self
._get
_jobs
("SELECT jobs.* FROM jobs \
59 WHERE time_started IS NOT NULL AND time_finished IS NULL \
60 ORDER BY time_started LIMIT %s", limit
)
64 def get_recently_ended(self
, limit
=None):
65 jobs
= self
._get
_jobs
("SELECT jobs.* FROM jobs \
66 WHERE time_finished IS NOT NULL ORDER BY time_finished DESC LIMIT %s", limit
)
70 def restart_failed(self
):
71 jobs
= self
._get
_jobs
("SELECT jobs.* FROM jobs \
72 JOIN builds ON builds.id = jobs.build_id \
74 jobs.type = 'build' AND \
75 jobs.state = 'failed' AND \
76 NOT builds.state = 'broken' AND \
77 jobs.time_finished < NOW() - '72 hours'::interval \
80 WHEN jobs.type = 'build' THEN 0 \
81 WHEN jobs.type = 'test' THEN 1 \
83 builds.priority DESC, jobs.time_created ASC")
90 class Job(base
.DataObject
):
94 return "<%s id=%s %s>" % (self
.__class
__.__name
__, self
.id, self
.name
)
96 def __eq__(self
, other
):
97 if isinstance(other
, self
.__class
__):
98 return self
.id == other
.id
100 def __lt__(self
, other
):
101 if isinstance(other
, self
.__class
__):
102 if not self
.test
and other
.test
:
105 if self
.build
== other
.build
:
106 return arches
.priority(self
.arch
) < arches
.priority(other
.arch
)
108 return self
.time_created
< other
.time_created
111 packages
= self
.backend
.packages
._get
_packages
("SELECT packages.* FROM jobs_packages \
112 LEFT JOIN packages ON jobs_packages.pkg_id = packages.id \
113 WHERE jobs_packages.job_id = %s ORDER BY packages.name", self
.id)
115 return iter(packages
)
117 def __nonzero__(self
):
121 res
= self
.db
.get("SELECT COUNT(*) AS len FROM jobs_packages \
122 WHERE job_id = %s", self
.id)
128 return self
.data
.uuid
132 return "%s-%s.%s" % (self
.pkg
.name
, self
.pkg
.friendly_version
, self
.arch
)
136 return self
.data
.build_id
140 return self
.backend
.builds
.get_by_id(self
.build_id
)
144 return self
.data
.test
147 def related_jobs(self
):
150 for job
in self
.build
.jobs
:
160 return self
.build
.pkg
164 return sum((p
.size
for p
in self
.packages
))
169 Returns the rank in the build queue
171 if not self
.state
== "pending":
174 res
= self
.db
.get("SELECT rank FROM jobs_queue WHERE job_id = %s", self
.id)
181 return self
.build
.distro
183 def get_superseeded_by(self
):
184 if self
.data
.superseeded_by
:
185 return self
.backend
.jobs
.get_by_id(self
.data
.superseeded_by
)
187 def set_superseeded_by(self
, superseeded_by
):
188 assert isinstance(superseeded_by
, self
.__class
__)
190 self
._set
_attribute
("superseeded_by", superseeded_by
.id)
192 superseeded_by
= lazy_property(get_superseeded_by
, set_superseeded_by
)
194 def start(self
, builder
):
196 Starts this job on builder
198 self
.builder
= builder
200 # Start to dispatch the build job
201 self
.state
= "dispatching"
204 self
.state
= "running"
207 self
.time_started
= datetime
.datetime
.utcnow()
208 self
.time_finished
= None
211 self
.state
= "finished"
214 self
.time_finished
= datetime
.datetime
.utcnow()
217 self
.send_finished_message()
219 def failed(self
, message
):
220 self
.state
= "failed"
221 self
.message
= message
224 self
.time_finished
= datetime
.datetime
.utcnow()
227 self
.send_failed_message()
229 def restart(self
, test
=None, start_not_before
=None):
230 # Copy the job and let it build again
231 job
= self
.backend
.jobs
.create(self
.build
, self
.arch
,
232 test
=test
or self
.test
, superseeds
=self
)
235 job
.start_not_before
= start_not_before
241 Deletes a job from the database
243 # Remove the buildroot
244 self
.db
.execute("DELETE FROM jobs_buildroots WHERE job_id = %s", self
.id)
247 self
.db
.execute("DELETE FROM jobs_history WHERE job_id = %s", self
.id)
249 # Delete all packages
251 self
.db
.execute("DELETE FROM jobs_packages \
252 WHERE job_id = %s AND pkg_id = %s", self
.id, pkg
.id)
255 # Remove all logfiles
256 for logfile
in self
.logfiles
:
257 self
.backend
.delete_file(os
.path
.join(PACKAGES_DIR
, logfile
.path
))
259 self
.db
.execute("DELETE FROM logfiles WHERE job_id = %s", self
.id)
261 # Delete the job itself.
262 self
.db
.execute("DELETE FROM jobs WHERE id = %s", self
.id)
266 def log(self
, action
, user
=None, state
=None, builder
=None, test_job
=None):
273 builder_id
= builder
.id
277 test_job_id
= test_job
.id
279 self
.db
.execute("INSERT INTO jobs_history(job_id, action, state, user_id, \
280 time, builder_id, test_job_id) VALUES(%s, %s, %s, %s, NOW(), %s, %s)",
281 self
.id, action
, state
, user_id
, builder_id
, test_job_id
)
283 def get_log(self
, limit
=None, offset
=None, user
=None):
284 query
= "SELECT * FROM jobs_history"
286 conditions
= ["job_id = %s",]
290 conditions
.append("user_id = %s")
294 query
+= " WHERE %s" % " AND ".join(conditions
)
296 query
+= " ORDER BY time DESC"
300 query
+= " LIMIT %s,%s"
301 args
+= [offset
, limit
,]
307 for entry
in self
.db
.query(query
, *args
):
308 entry
= logs
.JobLogEntry(self
.backend
, entry
)
309 entries
.append(entry
)
313 def is_running(self
):
315 Returns True if job is in a running state.
317 return self
.state
in ("pending", "dispatching", "running", "uploading")
320 return self
.data
.state
322 def set_state(self
, state
):
323 self
._set
_attribute
("state", state
)
325 # Automatically update the state of the build (not on test builds)
327 self
.build
.auto_update_state()
329 state
= property(get_state
, set_state
)
331 def set_message(self
, message
):
333 message
= "%s" % message
335 self
._set
_attribute
("message", message
)
337 message
= property(lambda s
: s
.data
.message
, set_message
)
339 def get_builder(self
):
340 if self
.data
.builder_id
:
341 return self
.backend
.builders
.get_by_id(self
.data
.builder_id
)
343 def set_builder(self
, builder
, user
=None):
344 self
._set
_attribute
("builder_id", builder
.id)
348 self
.log("builder_assigned", builder
=builder
, user
=user
)
350 builder
= lazy_property(get_builder
, set_builder
)
353 def candidate_builders(self
):
355 Returns all active builders that could build this job
357 builders
= self
.backend
.builders
.get_for_arch(self
.arch
)
359 # Remove all builders that are not available
360 builders
= (b
for b
in builders
if b
.enabled
and b
.is_online())
362 # Remove all builders that have too many jobs
363 builders
= (b
for b
in builders
if not b
.too_many_jobs
)
365 # Sort them by the fastest builder first
366 return sorted(builders
, key
=lambda b
: -b
.performance_index
)
369 def designated_builder(self
):
371 Returns the fastest candidate builder builder
373 if self
.candidate_builders
:
374 return self
.candidate_builders
[0]
378 return self
.data
.arch
382 if not self
.time_started
:
385 if self
.time_finished
:
386 delta
= self
.time_finished
- self
.time_started
388 delta
= datetime
.datetime
.utcnow() - self
.time_started
390 return delta
.total_seconds()
393 def time_created(self
):
394 return self
.data
.time_created
396 def set_time_started(self
, time_started
):
397 self
._set
_attribute
("time_started", time_started
)
399 time_started
= property(lambda s
: s
.data
.time_started
, set_time_started
)
401 def set_time_finished(self
, time_finished
):
402 self
._set
_attribute
("time_finished", time_finished
)
404 time_finished
= property(lambda s
: s
.data
.time_finished
, set_time_finished
)
406 def set_start_not_before(self
, start_not_before
):
407 self
._set
_attribute
("start_not_before", start_not_before
)
409 start_not_before
= property(lambda s
: s
.data
.start_not_before
, set_start_not_before
)
411 def get_pkg_by_uuid(self
, uuid
):
412 pkg
= self
.backend
.packages
._get
_package
("SELECT packages.id FROM packages \
413 JOIN jobs_packages ON jobs_packages.pkg_id = packages.id \
414 WHERE jobs_packages.job_id = %s AND packages.uuid = %s",
425 for log
in self
.db
.query("SELECT id FROM logfiles WHERE job_id = %s", self
.id):
426 log
= logs
.LogFile(self
.backend
, log
.id)
433 def add_file(self
, filename
):
435 Add the specified file to this job.
437 The file is copied to the right directory by this function.
439 assert os
.path
.exists(filename
)
441 if filename
.endswith(".log"):
442 self
._add
_file
_log
(filename
)
444 elif filename
.endswith(".%s" % PACKAGE_EXTENSION
):
445 # It is not allowed to upload packages on test builds.
449 self
._add
_file
_package
(filename
)
451 def _add_file_log(self
, filename
):
453 Attach a log file to this job.
455 target_dirname
= os
.path
.join(self
.build
.path
, "logs")
460 target_filename
= os
.path
.join(target_dirname
,
461 "test.%s.%s.%s.log" % (self
.arch
, i
, self
.uuid
))
463 if os
.path
.exists(target_filename
):
468 target_filename
= os
.path
.join(target_dirname
,
469 "build.%s.%s.log" % (self
.arch
, self
.uuid
))
471 # Make sure the target directory exists.
472 if not os
.path
.exists(target_dirname
):
473 os
.makedirs(target_dirname
)
475 # Calculate a SHA512 hash from that file.
476 f
= open(filename
, "rb")
479 buf
= f
.read(BUFFER_SIZE
)
486 # Copy the file to the final location.
487 shutil
.copy2(filename
, target_filename
)
489 # Create an entry in the database.
490 self
.db
.execute("INSERT INTO logfiles(job_id, path, filesize, hash_sha512) \
491 VALUES(%s, %s, %s, %s)", self
.id, os
.path
.relpath(target_filename
, PACKAGES_DIR
),
492 os
.path
.getsize(target_filename
), h
.hexdigest())
494 def _add_file_package(self
, filename
):
495 # Open package (creates entry in the database)
496 pkg
= self
.backend
.packages
.create(filename
)
498 # Move package to the build directory.
499 pkg
.move(os
.path
.join(self
.build
.path
, self
.arch
))
501 # Attach the package to this job.
502 self
.db
.execute("INSERT INTO jobs_packages(job_id, pkg_id) VALUES(%s, %s)",
505 def get_aborted_state(self
):
506 return self
.data
.aborted_state
508 def set_aborted_state(self
, state
):
509 self
._set
_attribute
("aborted_state", state
)
511 aborted_state
= property(get_aborted_state
, set_aborted_state
)
514 def message_recipients(self
):
517 # Add all people watching the build.
518 l
+= self
.build
.message_recipients
520 # Add the package maintainer on release builds.
521 if self
.build
.type == "release":
522 maint
= self
.pkg
.maintainer
524 if isinstance(maint
, users
.User
):
525 l
.append("%s <%s>" % (maint
.realname
, maint
.email
))
529 # XXX add committer and commit author.
531 # Add the owner of the scratch build on scratch builds.
532 elif self
.build
.type == "scratch" and self
.build
.user
:
533 l
.append("%s <%s>" % \
534 (self
.build
.user
.realname
, self
.build
.user
.email
))
538 def save_buildroot(self
, pkgs
):
539 # Cleanup old stuff first (for rebuilding packages)
540 self
.db
.execute("DELETE FROM jobs_buildroots WHERE job_id = %s", self
.id)
542 for pkg_name
, pkg_uuid
in pkgs
:
543 self
.db
.execute("INSERT INTO jobs_buildroots(job_id, pkg_uuid, pkg_name) \
544 VALUES(%s, %s, %s)", self
.id, pkg_name
, pkg_uuid
)
548 rows
= self
.db
.query("SELECT * FROM jobs_buildroots \
549 WHERE jobs_buildroots.job_id = %s ORDER BY pkg_name", self
.id)
553 # Search for this package in the packages table.
554 pkg
= self
.backend
.packages
.get_by_uuid(row
.pkg_uuid
)
555 pkgs
.append((row
.pkg_name
, row
.pkg_uuid
, pkg
))
559 def send_finished_message(self
):
560 # Send no finished mails for test jobs.
564 logging
.debug("Sending finished message for job %s to %s" % \
565 (self
.name
, ", ".join(self
.message_recipients
)))
568 "build_name" : self
.name
,
569 "build_host" : self
.builder
.name
,
570 "build_uuid" : self
.uuid
,
573 self
.backend
.messages
.send_to_all(self
.message_recipients
,
574 MSG_BUILD_FINISHED_SUBJECT
, MSG_BUILD_FINISHED
, info
)
576 def send_failed_message(self
):
577 logging
.debug("Sending failed message for job %s to %s" % \
578 (self
.name
, ", ".join(self
.message_recipients
)))
582 build_host
= self
.builder
.name
585 "build_name" : self
.name
,
586 "build_host" : build_host
,
587 "build_uuid" : self
.uuid
,
590 self
.backend
.messages
.send_to_all(self
.message_recipients
,
591 MSG_BUILD_FAILED_SUBJECT
, MSG_BUILD_FAILED
, info
)
593 def get_build_repos(self
):
595 Returns a list of all repositories that should be used when
598 repo_ids
= self
.db
.query("SELECT repo_id FROM jobs_repos WHERE job_id = %s",
602 return self
.distro
.get_build_repos()
605 for repo
in self
.distro
.repositories
:
606 if repo
.id in [r
.id for r
in repo_ids
]:
609 return repos
or self
.distro
.get_build_repos()
611 def get_config(self
, local
=False):
613 Get configuration file that is sent to the builder.
617 # Add the distribution configuration.
618 confs
.append(self
.distro
.get_config())
620 # Then add all repositories for this build.
621 for repo
in self
.get_build_repos():
622 conf
= repo
.get_conf(local
=local
)
625 return "\n\n".join(confs
)
627 def set_dependency_check_succeeded(self
, value
):
628 self
._set
_attribute
("dependency_check_succeeded", value
)
629 self
._set
_attribute
("dependency_check_at", datetime
.datetime
.utcnow())
635 dependency_check_succeeded
= property(
636 lambda s
: s
.data
.dependency_check_succeeded
,
637 set_dependency_check_succeeded
)
640 log
.info("Processing dependencies for %s..." % self
)
642 config
= pakfire
.config
.Config(files
=["general.conf"])
643 config
.parse(self
.get_config(local
=True))
645 # The filename of the source file.
646 filename
= os
.path
.join(PACKAGES_DIR
, self
.build
.pkg
.path
)
647 assert os
.path
.exists(filename
), filename
649 # Create a new pakfire instance with the configuration for
651 p
= pakfire
.PakfireServer(config
=config
, arch
=self
.arch
)
653 # Try to solve the build dependencies.
655 solver
= p
.resolvdep(filename
)
657 # Catch dependency errors and log the problem string.
658 except DependencyError
, e
:
659 self
.dependency_check_succeeded
= False
662 # The dependency check has succeeded
664 self
.dependency_check_succeeded
= True