]>
Commit | Line | Data |
---|---|---|
f6e6ff79 MT |
1 | #!/usr/bin/python |
2 | ||
3 | import datetime | |
4 | import hashlib | |
5 | import logging | |
6 | import os | |
7 | import re | |
8 | import shutil | |
9 | import uuid | |
10 | ||
11 | import pakfire | |
12 | import pakfire.config | |
13 | import pakfire.packages | |
14 | ||
15 | import base | |
16 | import builders | |
17 | import logs | |
18 | import packages | |
19 | import repository | |
20 | import updates | |
21 | import users | |
22 | ||
23 | from constants import * | |
24 | ||
25 | def import_from_package(_pakfire, filename, distro=None, commit=None, type="release", | |
26 | arches=None, check_for_duplicates=True, owner=None): | |
27 | ||
28 | if distro is None: | |
29 | distro = commit.source.distro | |
30 | ||
31 | assert distro | |
32 | ||
33 | # Open the package file to read some basic information. | |
34 | pkg = pakfire.packages.open(None, None, filename) | |
35 | ||
36 | if check_for_duplicates: | |
37 | if distro.has_package(pkg.name, pkg.epoch, pkg.version, pkg.release): | |
38 | logging.warning("Duplicate package detected: %s. Skipping." % pkg) | |
39 | return | |
40 | ||
41 | # Open the package and add it to the database. | |
42 | pkg = packages.Package.open(_pakfire, filename) | |
43 | logging.debug("Created new package: %s" % pkg) | |
44 | ||
45 | # Associate the package to the processed commit. | |
46 | if commit: | |
47 | pkg.commit = commit | |
48 | ||
49 | # Create a new build object from the package which | |
50 | # is always a release build. | |
51 | build = Build.create(_pakfire, pkg, type=type, owner=owner, distro=distro) | |
52 | logging.debug("Created new build job: %s" % build) | |
53 | ||
54 | # Create all automatic jobs. | |
55 | build.create_autojobs(arches=arches) | |
56 | ||
57 | return pkg, build | |
58 | ||
59 | ||
60 | class Builds(base.Object): | |
eedc6432 MT |
61 | def get_by_id(self, id, data=None): |
62 | return Build(self.pakfire, id, data=data) | |
f6e6ff79 MT |
63 | |
64 | def get_by_uuid(self, uuid): | |
65 | build = self.db.get("SELECT id FROM builds WHERE uuid = %s LIMIT 1", uuid) | |
66 | ||
67 | if build: | |
68 | return self.get_by_id(build.id) | |
69 | ||
70 | def get_all(self, limit=50): | |
eedc6432 | 71 | query = "SELECT * FROM builds ORDER BY time_created DESC" |
f6e6ff79 MT |
72 | |
73 | if limit: | |
74 | query += " LIMIT %d" % limit | |
75 | ||
eedc6432 | 76 | return [self.get_by_id(b.id, b) for b in self.db.query(query)] |
f6e6ff79 | 77 | |
eedc6432 | 78 | def get_by_user(self, user, type=None, public=None): |
f6e6ff79 MT |
79 | args = [] |
80 | conditions = [] | |
81 | ||
82 | if not type or type == "scratch": | |
83 | # On scratch builds the user id equals the owner id. | |
84 | conditions.append("(builds.type = 'scratch' AND owner_id = %s)") | |
85 | args.append(user.id) | |
86 | ||
87 | elif not type or type == "release": | |
88 | pass # TODO | |
89 | ||
90 | if public is True: | |
91 | conditions.append("public = 'Y'") | |
92 | elif public is False: | |
93 | conditions.append("public = 'N'") | |
94 | ||
eedc6432 | 95 | query = "SELECT builds.* AS id FROM builds \ |
f6e6ff79 MT |
96 | JOIN packages ON builds.pkg_id = packages.id" |
97 | ||
98 | if conditions: | |
99 | query += " WHERE %s" % " AND ".join(conditions) | |
100 | ||
eedc6432 | 101 | query += " ORDER BY builds.time_created DESC" |
f6e6ff79 | 102 | |
eedc6432 | 103 | builds = [] |
f6e6ff79 | 104 | for build in self.db.query(query, *args): |
eedc6432 MT |
105 | build = Build(self.pakfire, build.id, build) |
106 | builds.append(build) | |
107 | ||
108 | return builds | |
f6e6ff79 MT |
109 | |
110 | def get_by_name(self, name, type=None, public=None, user=None): | |
111 | args = [name,] | |
112 | conditions = [ | |
113 | "packages.name = %s", | |
114 | ] | |
115 | ||
116 | if type: | |
117 | conditions.append("builds.type = %s") | |
118 | args.append(type) | |
119 | ||
120 | or_conditions = [] | |
121 | if public is True: | |
122 | or_conditions.append("public = 'Y'") | |
123 | elif public is False: | |
124 | or_conditions.append("public = 'N'") | |
125 | ||
126 | if user and not user.is_admin(): | |
127 | or_conditions.append("builds.owner_id = %s") | |
128 | args.append(user.id) | |
129 | ||
130 | query = "SELECT builds.id AS id FROM builds \ | |
131 | JOIN packages ON builds.pkg_id = packages.id" | |
132 | ||
133 | if or_conditions: | |
134 | conditions.append(" OR ".join(or_conditions)) | |
135 | ||
136 | if conditions: | |
137 | query += " WHERE %s" % " AND ".join(conditions) | |
138 | ||
139 | query += " ORDER BY packages.name,packages.epoch,packages.version,packages.release,id ASC" | |
140 | ||
141 | return sorted([Build(self.pakfire, b.id) for b in self.db.query(query, *args)]) | |
142 | ||
143 | def get_latest_by_name(self, name, type=None, public=None): | |
144 | if type is None: | |
145 | types = ("release", "scratch") | |
146 | else: | |
147 | types = (type,) | |
148 | ||
149 | query = "SELECT builds.id AS id FROM builds \ | |
150 | JOIN packages ON builds.pkg_id = packages.id \ | |
151 | WHERE builds.type = %s AND packages.name = %s" | |
152 | args = [name,] | |
153 | ||
154 | if public is True: | |
155 | query += " AND builds.public = 'Y'" | |
156 | elif public is False: | |
157 | query += " AND builds.public = 'N'" | |
158 | ||
159 | for type in types: | |
160 | res = self.db.query(query, type, *args) | |
161 | if not res: | |
162 | continue | |
163 | ||
164 | builds = [Build(self.pakfire, b.id) for b in res] | |
165 | builds.sort(reverse=True) | |
166 | ||
167 | return builds[0] | |
168 | ||
169 | def count(self): | |
170 | count = self.cache.get("builds_count") | |
171 | if count is None: | |
172 | builds = self.db.get("SELECT COUNT(*) AS count FROM builds") | |
173 | ||
174 | count = builds.count | |
175 | self.cache.set("builds_count", count, 3600 / 4) | |
176 | ||
177 | return count | |
178 | ||
179 | def needs_test(self, threshold, arch, limit=None, randomize=False): | |
180 | query = "SELECT id FROM builds \ | |
181 | WHERE NOT EXISTS \ | |
182 | (SELECT * FROM jobs WHERE \ | |
183 | jobs.build_id = builds.id AND \ | |
21fcb77a | 184 | jobs.arch_id = %s AND \ |
f6e6ff79 MT |
185 | (jobs.state != 'finished' OR \ |
186 | jobs.time_finished >= %s) \ | |
187 | ) \ | |
188 | AND EXISTS \ | |
189 | (SELECT * FROM jobs WHERE \ | |
190 | jobs.build_id = builds.id AND \ | |
191 | jobs.arch_id = %s AND \ | |
192 | jobs.type = 'build' AND \ | |
193 | jobs.state = 'finished' AND \ | |
194 | jobs.time_finished < %s \ | |
195 | ) \ | |
83be3106 MT |
196 | AND builds.type = 'release' \ |
197 | AND (builds.state = 'stable' OR builds.state = 'testing')" | |
21fcb77a | 198 | args = [arch.id, threshold, arch.id, threshold] |
f6e6ff79 MT |
199 | |
200 | if randomize: | |
201 | query += " ORDER BY RAND()" | |
202 | ||
203 | if limit: | |
204 | query += " LIMIT %s" | |
205 | args.append(limit) | |
206 | ||
207 | return [Build(self.pakfire, b.id) for b in self.db.query(query, *args)] | |
208 | ||
209 | def get_obsolete(self, repo=None): | |
210 | """ | |
211 | Get all obsoleted builds. | |
212 | ||
213 | If repo is True: which are in any repository. | |
214 | If repo is some Repository object: which are in this repository. | |
215 | """ | |
216 | args = [] | |
217 | ||
218 | if repo is None: | |
219 | query = "SELECT id FROM builds WHERE state = 'obsolete'" | |
220 | ||
221 | else: | |
222 | query = "SELECT build_id AS id FROM repositories_builds \ | |
223 | JOIN builds ON builds.id = repositories_builds.build_id \ | |
224 | WHERE builds.state = 'obsolete'" | |
225 | ||
226 | if repo and not repo is True: | |
227 | query += " AND repositories_builds.repo_id = %s" | |
228 | args.append(repo.id) | |
229 | ||
230 | res = self.db.query(query, *args) | |
231 | ||
232 | builds = [] | |
233 | for build in res: | |
234 | build = Build(self.pakfire, build.id) | |
235 | builds.append(build) | |
236 | ||
237 | return builds | |
238 | ||
4b1e87c4 MT |
239 | def get_changelog(self, name, public=None, limit=5, offset=0): |
240 | query = "SELECT builds.* FROM builds \ | |
241 | JOIN packages ON builds.pkg_id = packages.id \ | |
242 | WHERE \ | |
243 | builds.type = %s \ | |
244 | AND \ | |
245 | packages.name = %s" | |
246 | args = ["release", name,] | |
247 | ||
248 | if public == True: | |
249 | query += " AND builds.public = %s" | |
250 | args.append("Y") | |
251 | elif public == False: | |
252 | query += " AND builds.public = %s" | |
253 | args.append("N") | |
254 | ||
255 | query += " ORDER BY builds.time_created DESC" | |
256 | ||
257 | if limit: | |
258 | if offset: | |
259 | query += " LIMIT %s,%s" | |
260 | args += [offset, limit] | |
261 | else: | |
262 | query += " LIMIT %s" | |
263 | args.append(limit) | |
264 | ||
265 | builds = [] | |
266 | for b in self.db.query(query, *args): | |
267 | b = Build(self.pakfire, b.id, b) | |
268 | builds.append(b) | |
269 | ||
270 | builds.sort(reverse=True) | |
271 | ||
272 | return builds | |
273 | ||
62c7e7cd MT |
274 | def get_comments(self, limit=10, offset=None, user=None): |
275 | query = "SELECT * FROM builds_comments \ | |
276 | JOIN users ON builds_comments.user_id = users.id" | |
277 | args = [] | |
278 | ||
279 | wheres = [] | |
280 | if user: | |
281 | wheres.append("users.id = %s") | |
282 | args.append(user.id) | |
283 | ||
284 | if wheres: | |
285 | query += " WHERE %s" % " AND ".join(wheres) | |
286 | ||
287 | # Sort everything. | |
288 | query += " ORDER BY time_created DESC" | |
289 | ||
290 | # Limits. | |
291 | if limit: | |
292 | if offset: | |
293 | query += " LIMIT %s,%s" | |
294 | args.append(offset) | |
295 | else: | |
296 | query += " LIMIT %s" | |
297 | ||
298 | args.append(limit) | |
299 | ||
300 | comments = [] | |
301 | for comment in self.db.query(query, *args): | |
302 | comment = logs.CommentLogEntry(self.pakfire, comment) | |
303 | comments.append(comment) | |
304 | ||
305 | return comments | |
306 | ||
f6e6ff79 MT |
307 | |
308 | class Build(base.Object): | |
734c61e0 | 309 | def __init__(self, pakfire, id, data=None): |
f6e6ff79 MT |
310 | base.Object.__init__(self, pakfire) |
311 | ||
312 | # ID of this build | |
313 | self.id = id | |
314 | ||
315 | # Cache data. | |
734c61e0 | 316 | self._data = data |
f6e6ff79 MT |
317 | self._jobs = None |
318 | self._jobs_test = None | |
319 | self._depends_on = None | |
320 | self._pkg = None | |
321 | self._credits = None | |
322 | self._owner = None | |
323 | self._update = None | |
324 | self._repo = None | |
325 | self._distro = None | |
326 | ||
327 | def __repr__(self): | |
328 | return "<%s id=%s %s>" % (self.__class__.__name__, self.id, self.pkg) | |
329 | ||
330 | def __cmp__(self, other): | |
331 | assert self.pkg | |
332 | assert other.pkg | |
333 | ||
334 | return cmp(self.pkg, other.pkg) | |
335 | ||
336 | @property | |
337 | def cache_key(self): | |
338 | return "build_%s" % self.id | |
339 | ||
340 | def clear_cache(self): | |
341 | """ | |
342 | Clear the stored data from the cache. | |
343 | """ | |
344 | self.cache.delete(self.cache_key) | |
345 | ||
346 | @classmethod | |
347 | def create(cls, pakfire, pkg, type="release", owner=None, distro=None, public=True): | |
348 | assert type in ("release", "scratch", "test") | |
349 | assert distro, "You need to specify the distribution of this build." | |
350 | ||
351 | if public: | |
352 | public = "Y" | |
353 | else: | |
354 | public = "N" | |
355 | ||
356 | # Check if scratch build has an owner. | |
357 | if type == "scratch" and not owner: | |
358 | raise Exception, "Scratch builds require an owner" | |
359 | ||
360 | # Set the default priority of this build. | |
361 | if type == "release": | |
362 | priority = 0 | |
363 | ||
364 | elif type == "scratch": | |
365 | priority = 1 | |
366 | ||
367 | elif type == "test": | |
368 | priority = -1 | |
369 | ||
370 | id = pakfire.db.execute(""" | |
371 | INSERT INTO builds(uuid, pkg_id, type, distro_id, time_created, public, priority) | |
372 | VALUES(%s, %s, %s, %s, NOW(), %s, %s)""", "%s" % uuid.uuid4(), pkg.id, | |
373 | type, distro.id, public, priority) | |
374 | ||
375 | # Set the owner of this buildgroup. | |
376 | if owner: | |
377 | pakfire.db.execute("UPDATE builds SET owner_id = %s WHERE id = %s", | |
378 | owner.id, id) | |
379 | ||
380 | build = cls(pakfire, id) | |
381 | ||
382 | # Log that the build has been created. | |
383 | build.log("created", user=owner) | |
384 | ||
385 | # Create directory where the files live. | |
386 | if not os.path.exists(build.path): | |
387 | os.makedirs(build.path) | |
388 | ||
389 | # Move package file to the directory of the build. | |
390 | source_path = os.path.join(build.path, "src") | |
391 | build.pkg.move(source_path) | |
392 | ||
393 | # Generate an update id. | |
394 | build.generate_update_id() | |
395 | ||
396 | # Obsolete all other builds with the same name to track updates. | |
397 | build.obsolete_others() | |
398 | ||
399 | # Search for possible bug IDs in the commit message. | |
400 | build.search_for_bugs() | |
401 | ||
402 | return build | |
403 | ||
404 | def delete(self): | |
405 | """ | |
406 | Deletes this build including all jobs, packages and the source | |
407 | package. | |
408 | """ | |
409 | # If the build is in a repository, we need to remove it. | |
410 | if self.repo: | |
411 | self.repo.rem_build(self) | |
412 | ||
413 | for job in self.jobs + self.test_jobs: | |
414 | job.delete() | |
415 | ||
416 | if self.pkg: | |
417 | self.pkg.delete() | |
418 | ||
419 | # Delete everything related to this build. | |
420 | self.__delete_bugs() | |
421 | self.__delete_comments() | |
422 | self.__delete_history() | |
423 | self.__delete_watchers() | |
424 | ||
425 | # Delete the build itself. | |
426 | self.db.execute("DELETE FROM builds WHERE id = %s", self.id) | |
427 | self.clear_cache() | |
428 | ||
429 | def __delete_bugs(self): | |
430 | """ | |
431 | Delete all associated bugs. | |
432 | """ | |
433 | self.db.execute("DELETE FROM builds_bugs WHERE build_id = %s", self.id) | |
434 | ||
435 | def __delete_comments(self): | |
436 | """ | |
437 | Delete all comments. | |
438 | """ | |
439 | self.db.execute("DELETE FROM builds_comments WHERE build_id = %s", self.id) | |
440 | ||
441 | def __delete_history(self): | |
442 | """ | |
443 | Delete the repository history. | |
444 | """ | |
445 | self.db.execute("DELETE FROM repositories_history WHERE build_id = %s", self.id) | |
446 | ||
447 | def __delete_watchers(self): | |
448 | """ | |
449 | Delete all watchers. | |
450 | """ | |
451 | self.db.execute("DELETE FROM builds_watchers WHERE build_id = %s", self.id) | |
452 | ||
453 | def reset(self): | |
454 | """ | |
455 | Resets the whole build so it can start again (as it has never | |
456 | been started). | |
457 | """ | |
458 | for job in self.jobs: | |
459 | job.reset() | |
460 | ||
461 | #self.__delete_bugs() | |
462 | self.__delete_comments() | |
463 | self.__delete_history() | |
464 | self.__delete_watchers() | |
465 | ||
466 | self.state = "building" | |
467 | ||
468 | # XXX empty log | |
469 | ||
470 | @property | |
471 | def data(self): | |
472 | """ | |
473 | Lazy fetching of data for this object. | |
474 | """ | |
475 | if self._data is None: | |
476 | data = self.cache.get(self.cache_key) | |
477 | if not data: | |
478 | # Fetch the whole row in one call. | |
479 | data = self.db.get("SELECT * FROM builds WHERE id = %s", self.id) | |
480 | self.cache.set(self.cache_key, data) | |
481 | ||
482 | self._data = data | |
483 | assert self._data | |
484 | ||
485 | return self._data | |
486 | ||
487 | @property | |
488 | def info(self): | |
489 | """ | |
490 | A set of information that is sent to the XMLRPC client. | |
491 | """ | |
492 | return { "uuid" : self.uuid } | |
493 | ||
494 | def log(self, action, user=None, bug_id=None): | |
495 | user_id = None | |
496 | if user: | |
497 | user_id = user.id | |
498 | ||
499 | self.db.execute("INSERT INTO builds_history(build_id, action, user_id, time, bug_id) \ | |
500 | VALUES(%s, %s, %s, NOW(), %s)", self.id, action, user_id, bug_id) | |
501 | ||
502 | @property | |
503 | def uuid(self): | |
504 | """ | |
505 | The UUID of this build. | |
506 | """ | |
507 | return self.data.uuid | |
508 | ||
509 | @property | |
510 | def pkg(self): | |
511 | """ | |
512 | Get package that is to be built in the build. | |
513 | """ | |
514 | if self._pkg is None: | |
515 | self._pkg = packages.Package(self.pakfire, self.data.pkg_id) | |
516 | ||
517 | return self._pkg | |
518 | ||
519 | @property | |
520 | def name(self): | |
521 | return "%s-%s" % (self.pkg.name, self.pkg.friendly_version) | |
522 | ||
523 | @property | |
524 | def type(self): | |
525 | """ | |
526 | The type of this build. | |
527 | """ | |
528 | return self.data.type | |
529 | ||
530 | @property | |
531 | def owner_id(self): | |
532 | """ | |
533 | The ID of the owner of this build. | |
534 | """ | |
535 | return self.data.owner_id | |
536 | ||
537 | @property | |
538 | def owner(self): | |
539 | """ | |
540 | The owner of this build. | |
541 | """ | |
542 | if not self.owner_id: | |
543 | return | |
544 | ||
545 | if self._owner is None: | |
546 | self._owner = self.pakfire.users.get_by_id(self.owner_id) | |
547 | assert self._owner | |
548 | ||
549 | return self._owner | |
550 | ||
551 | @property | |
552 | def distro_id(self): | |
553 | return self.data.distro_id | |
554 | ||
555 | @property | |
556 | def distro(self): | |
557 | if self._distro is None: | |
558 | self._distro = self.pakfire.distros.get_by_id(self.distro_id) | |
559 | assert self._distro | |
560 | ||
561 | return self._distro | |
562 | ||
563 | @property | |
564 | def user(self): | |
565 | if self.type == "scratch": | |
566 | return self.owner | |
567 | ||
568 | def get_depends_on(self): | |
569 | if self.data.depends_on and self._depends_on is None: | |
570 | self._depends_on = Build(self.pakfire, self.data.depends_on) | |
571 | ||
572 | return self._depends_on | |
573 | ||
574 | def set_depends_on(self, build): | |
575 | self.db.execute("UPDATE builds SET depends_on = %s WHERE id = %s", | |
576 | build.id, self.id) | |
577 | self.clear_cache() | |
578 | ||
579 | # Update cache. | |
580 | self._depends_on = build | |
581 | self._data["depends_on"] = build.id | |
582 | ||
583 | depends_on = property(get_depends_on, set_depends_on) | |
584 | ||
585 | @property | |
586 | def created(self): | |
587 | return self.data.time_created | |
588 | ||
eedc6432 MT |
589 | @property |
590 | def date(self): | |
591 | return self.created.date() | |
592 | ||
f6e6ff79 MT |
593 | @property |
594 | def public(self): | |
595 | """ | |
596 | Is this build public? | |
597 | """ | |
598 | return self.data.public == "Y" | |
599 | ||
eedc6432 MT |
600 | @property |
601 | def size(self): | |
602 | """ | |
603 | Returns the size on disk of this build. | |
604 | """ | |
605 | s = 0 | |
606 | ||
607 | # Add the source package. | |
608 | if self.pkg: | |
609 | s += self.pkg.size | |
610 | ||
611 | # Add all jobs. | |
612 | s += sum((j.size for j in self.jobs)) | |
613 | ||
614 | return s | |
615 | ||
f6e6ff79 MT |
616 | #@property |
617 | #def state(self): | |
618 | # # Cache all states. | |
619 | # states = [j.state for j in self.jobs] | |
620 | # | |
621 | # target_state = "unknown" | |
622 | # | |
623 | # # If at least one job has failed, the whole build has failed. | |
624 | # if "failed" in states: | |
625 | # target_state = "failed" | |
626 | # | |
627 | # # It at least one of the jobs is still running, the whole | |
628 | # # build is in running state. | |
629 | # elif "running" in states: | |
630 | # target_state = "running" | |
631 | # | |
632 | # # If all jobs are in the finished state, we turn into finished | |
633 | # # state as well. | |
634 | # elif all([s == "finished" for s in states]): | |
635 | # target_state = "finished" | |
636 | # | |
637 | # return target_state | |
638 | ||
639 | def auto_update_state(self): | |
640 | """ | |
641 | Check if the state of this build can be updated and perform | |
642 | the change if possible. | |
643 | """ | |
644 | # Do not change the broken/obsolete state automatically. | |
645 | if self.state in ("broken", "obsolete"): | |
646 | return | |
647 | ||
648 | if self.repo and self.repo.type == "stable": | |
649 | self.update_state("stable") | |
650 | return | |
651 | ||
652 | # If any of the build jobs are finished, the build will be put in testing | |
653 | # state. | |
654 | for job in self.jobs: | |
655 | if job.state == "finished": | |
656 | self.update_state("testing") | |
657 | break | |
658 | ||
659 | def update_state(self, state, user=None, remove=False): | |
660 | assert state in ("stable", "testing", "obsolete", "broken") | |
661 | ||
662 | self.db.execute("UPDATE builds SET state = %s WHERE id = %s", state, self.id) | |
663 | ||
664 | if self._data: | |
665 | self._data["state"] = state | |
f6e6ff79 MT |
666 | |
667 | # In broken state, the removal from the repository is forced and | |
668 | # all jobs that are not finished yet will be aborted. | |
669 | if state == "broken": | |
670 | remove = True | |
671 | ||
672 | for job in self.jobs: | |
673 | if job.state in ("new", "pending", "running", "dependency_error"): | |
674 | job.state = "aborted" | |
675 | ||
676 | # If this build is in a repository, it will leave it. | |
677 | if remove and self.repo: | |
678 | self.repo.rem_build(self) | |
679 | ||
680 | # If a release build is now in testing state, we put it into the | |
681 | # first repository of the distribution. | |
682 | elif self.type == "release" and state == "testing": | |
683 | # If the build is not in a repository, yet and if there is | |
684 | # a first repository, we put the build there. | |
685 | if not self.repo and self.distro.first_repo: | |
686 | self.distro.first_repo.add_build(self, user=user) | |
687 | ||
688 | @property | |
689 | def state(self): | |
690 | return self.data.state | |
691 | ||
9fa1787c MT |
692 | def is_broken(self): |
693 | return self.state == "broken" | |
694 | ||
f6e6ff79 MT |
695 | def obsolete_others(self): |
696 | if not self.type == "release": | |
697 | return | |
698 | ||
699 | for build in self.pakfire.builds.get_by_name(self.pkg.name, type="release"): | |
700 | # Don't modify ourself. | |
701 | if self.id == build.id: | |
702 | continue | |
703 | ||
704 | # Don't touch broken builds. | |
705 | if build.state in ("obsolete", "broken"): | |
706 | continue | |
707 | ||
708 | # Obsolete the build. | |
709 | build.update_state("obsolete") | |
710 | ||
711 | def set_severity(self, severity): | |
712 | self.db.execute("UPDATE builds SET severity = %s WHERE id = %s", state, self.id) | |
713 | ||
714 | if self._data: | |
715 | self._data["severity"] = severity | |
716 | self.clear_cache() | |
717 | ||
718 | def get_severity(self): | |
719 | return self.data.severity | |
720 | ||
721 | severity = property(get_severity, set_severity) | |
722 | ||
723 | @property | |
724 | def commit(self): | |
725 | if self.pkg and self.pkg.commit: | |
726 | return self.pkg.commit | |
727 | ||
728 | def update_message(self, msg): | |
729 | self.db.execute("UPDATE builds SET message = %s WHERE id = %s", msg, self.id) | |
730 | ||
731 | if self._data: | |
732 | self._data["message"] = msg | |
733 | self.clear_cache() | |
734 | ||
735 | def has_perm(self, user): | |
736 | """ | |
737 | Check, if the given user has the right to perform administrative | |
738 | operations on this build. | |
739 | """ | |
740 | if user is None: | |
741 | return False | |
742 | ||
743 | if user.is_admin(): | |
744 | return True | |
745 | ||
746 | # Check if the user is allowed to manage packages from the critical path. | |
747 | if self.critical_path and not user.has_perm("manage_critical_path"): | |
748 | return False | |
749 | ||
750 | # Search for maintainers... | |
751 | ||
752 | # Scratch builds. | |
753 | if self.type == "scratch": | |
754 | # The owner of a scratch build has the right to do anything with it. | |
755 | if self.owner_id == user.id: | |
756 | return True | |
757 | ||
758 | # Release builds. | |
759 | elif self.type == "release": | |
760 | # The maintainer also is allowed to manage the build. | |
761 | if self.pkg.maintainer == user: | |
762 | return True | |
763 | ||
764 | # Deny permission for all other cases. | |
765 | return False | |
766 | ||
767 | @property | |
768 | def message(self): | |
769 | message = "" | |
770 | ||
771 | if self.data.message: | |
772 | message = self.data.message | |
773 | ||
774 | elif self.commit: | |
775 | if self.commit.message: | |
776 | message = "\n".join((self.commit.subject, self.commit.message)) | |
777 | else: | |
778 | message = self.commit.subject | |
779 | ||
780 | prefix = "%s: " % self.pkg.name | |
781 | if message.startswith(prefix): | |
782 | message = message[len(prefix):] | |
783 | ||
784 | return message | |
785 | ||
786 | def get_priority(self): | |
787 | return self.data.priority | |
788 | ||
789 | def set_priority(self, priority): | |
790 | assert priority in (-2, -1, 0, 1, 2) | |
791 | ||
792 | self.db.execute("UPDATE builds SET priority = %s WHERE id = %s", priority, | |
793 | self.id) | |
794 | self.clear_cache() | |
795 | ||
796 | if self._data: | |
797 | self._data["priority"] = priority | |
798 | ||
799 | priority = property(get_priority, set_priority) | |
800 | ||
801 | @property | |
802 | def path(self): | |
803 | path = [] | |
804 | if self.type == "scratch": | |
805 | path.append(BUILD_SCRATCH_DIR) | |
806 | path.append(self.uuid) | |
807 | ||
808 | elif self.type == "release": | |
809 | path.append(BUILD_RELEASE_DIR) | |
810 | path.append("%s/%s-%s-%s" % \ | |
811 | (self.pkg.name, self.pkg.epoch, self.pkg.version, self.pkg.release)) | |
812 | ||
813 | else: | |
814 | raise Exception, "Unknown build type: %s" % self.type | |
815 | ||
816 | return os.path.join(*path) | |
817 | ||
818 | @property | |
819 | def source_filename(self): | |
820 | return os.path.basename(self.pkg.path) | |
821 | ||
822 | @property | |
823 | def download_prefix(self): | |
824 | return "/".join((self.pakfire.settings.get("download_baseurl"), "packages")) | |
825 | ||
826 | @property | |
827 | def source_download(self): | |
828 | return "/".join((self.download_prefix, self.pkg.path)) | |
829 | ||
830 | @property | |
831 | def source_hash_sha512(self): | |
832 | return self.pkg.hash_sha512 | |
833 | ||
834 | @property | |
835 | def link(self): | |
836 | # XXX maybe this should rather live in a uimodule. | |
837 | # zlib-1.2.3-2.ip3 [src, i686, blah...] | |
838 | s = """<a class="state_%s %s" href="/build/%s">%s</a>""" % \ | |
839 | (self.state, self.type, self.uuid, self.name) | |
840 | ||
841 | s_jobs = [] | |
842 | for job in self.jobs: | |
843 | s_jobs.append("""<a class="state_%s %s" href="/job/%s">%s</a>""" % \ | |
844 | (job.state, job.type, job.uuid, job.arch.name)) | |
845 | ||
846 | if s_jobs: | |
847 | s += " [%s]" % ", ".join(s_jobs) | |
848 | ||
849 | return s | |
850 | ||
851 | @property | |
852 | def supported_arches(self): | |
853 | return self.pkg.supported_arches | |
854 | ||
855 | @property | |
856 | def critical_path(self): | |
857 | return self.pkg.critical_path | |
858 | ||
859 | def get_jobs(self, type=None): | |
860 | """ | |
861 | Returns a list of jobs of this build. | |
862 | """ | |
863 | return self.pakfire.jobs.get_by_build(self.id, self, type=type) | |
864 | ||
865 | @property | |
866 | def jobs(self): | |
867 | """ | |
868 | Get a list of all build jobs that are in this build. | |
869 | """ | |
870 | if self._jobs is None: | |
871 | self._jobs = self.get_jobs(type="build") | |
872 | ||
873 | return self._jobs | |
874 | ||
875 | @property | |
876 | def test_jobs(self): | |
877 | if self._jobs_test is None: | |
878 | self._jobs_test = self.get_jobs(type="test") | |
879 | ||
880 | return self._jobs_test | |
881 | ||
882 | @property | |
883 | def all_jobs_finished(self): | |
884 | ret = True | |
885 | ||
886 | for job in self.jobs: | |
887 | if not job.state == "finished": | |
888 | ret = False | |
889 | break | |
890 | ||
891 | return ret | |
892 | ||
893 | def create_autojobs(self, arches=None, type="build"): | |
894 | jobs = [] | |
895 | ||
896 | # Arches may be passed to this function. If not we use all arches | |
897 | # this package supports. | |
898 | if arches is None: | |
899 | arches = self.supported_arches | |
900 | ||
901 | # Create a new job for every given archirecture. | |
902 | for arch in self.pakfire.arches.expand(arches): | |
903 | # Don't create jobs for src. | |
904 | if arch.name == "src": | |
905 | continue | |
906 | ||
907 | job = self.add_job(arch, type=type) | |
908 | jobs.append(job) | |
909 | ||
910 | # Return all newly created jobs. | |
911 | return jobs | |
912 | ||
913 | def add_job(self, arch, type="build"): | |
914 | job = Job.create(self.pakfire, self, arch, type=type) | |
915 | ||
916 | # Add new job to cache. | |
917 | if self._jobs: | |
918 | self._jobs.append(job) | |
919 | ||
920 | return job | |
921 | ||
922 | ## Update stuff | |
923 | ||
924 | @property | |
925 | def update_id(self): | |
926 | if not self.type == "release": | |
927 | return | |
928 | ||
929 | # Generate an update ID if none does exist, yet. | |
930 | self.generate_update_id() | |
931 | ||
932 | s = [ | |
933 | "%s" % self.distro.name.replace(" ", "").upper(), | |
934 | "%04d" % (self.data.update_year or 0), | |
935 | "%04d" % (self.data.update_num or 0), | |
936 | ] | |
937 | ||
938 | return "-".join(s) | |
939 | ||
940 | def generate_update_id(self): | |
941 | if not self.type == "release": | |
942 | return | |
943 | ||
944 | if self.data.update_num: | |
945 | return | |
946 | ||
947 | update = self.db.get("SELECT update_num AS num FROM builds \ | |
948 | WHERE update_year = YEAR(NOW()) ORDER BY update_num DESC LIMIT 1") | |
949 | ||
950 | if update: | |
951 | update_num = update.num + 1 | |
952 | else: | |
953 | update_num = 1 | |
954 | ||
955 | self.db.execute("UPDATE builds SET update_year = YEAR(NOW()), update_num = %s \ | |
956 | WHERE id = %s", update_num, self.id) | |
957 | ||
958 | ## Comment stuff | |
959 | ||
960 | def get_comments(self, limit=10, offset=0): | |
961 | query = "SELECT * FROM builds_comments \ | |
962 | JOIN users ON builds_comments.user_id = users.id \ | |
963 | WHERE build_id = %s ORDER BY time_created ASC" | |
964 | ||
965 | comments = [] | |
966 | for comment in self.db.query(query, self.id): | |
967 | comment = logs.CommentLogEntry(self.pakfire, comment) | |
968 | comments.append(comment) | |
969 | ||
970 | return comments | |
971 | ||
972 | def add_comment(self, user, text, credit): | |
973 | # Add the new comment to the database. | |
974 | id = self.db.execute("INSERT INTO \ | |
975 | builds_comments(build_id, user_id, text, credit, time_created) \ | |
976 | VALUES(%s, %s, %s, %s, NOW())", | |
977 | self.id, user.id, text, credit) | |
978 | ||
979 | # Update the credit cache. | |
980 | if not self._credits is None: | |
981 | self._credits += credit | |
982 | ||
983 | # Send the new comment to all watchers and stuff. | |
984 | self.send_comment_message(id) | |
985 | ||
986 | # Return the ID of the newly created comment. | |
987 | return id | |
988 | ||
989 | @property | |
990 | def score(self): | |
991 | # XXX UPDATE THIS | |
992 | if self._credits is None: | |
993 | # Get the sum of the credits from the database. | |
994 | query = self.db.get( | |
995 | "SELECT SUM(credit) as credits FROM builds_comments WHERE build_id = %s", | |
996 | self.id | |
997 | ) | |
998 | ||
999 | self._credits = query.credits or 0 | |
1000 | ||
1001 | return self._credits | |
1002 | ||
1003 | @property | |
1004 | def credits(self): | |
1005 | # XXX COMPAT | |
1006 | return self.score | |
1007 | ||
1008 | def get_commenters(self): | |
1009 | users = self.db.query("SELECT DISTINCT users.id AS id FROM builds_comments \ | |
1010 | JOIN users ON builds_comments.user_id = users.id \ | |
1011 | WHERE builds_comments.build_id = %s AND NOT users.deleted = 'Y' \ | |
1012 | AND NOT users.activated = 'Y' ORDER BY users.id", self.id) | |
1013 | ||
1014 | return [users.User(self.pakfire, u.id) for u in users] | |
1015 | ||
1016 | def send_comment_message(self, comment_id): | |
1017 | comment = self.db.get("SELECT * FROM builds_comments WHERE id = %s", | |
1018 | comment_id) | |
1019 | ||
1020 | assert comment | |
1021 | assert comment.build_id == self.id | |
1022 | ||
1023 | # Get user who wrote the comment. | |
1024 | user = self.pakfire.users.get_by_id(comment.user_id) | |
1025 | ||
1026 | format = { | |
1027 | "build_name" : self.name, | |
1028 | "user_name" : user.realname, | |
1029 | } | |
1030 | ||
1031 | # XXX create beautiful message | |
1032 | ||
1033 | self.pakfire.messages.send_to_all(self.message_recipients, | |
1034 | N_("%(user_name)s commented on %(build_name)s"), | |
1035 | comment.text, format) | |
1036 | ||
1037 | ## Logging stuff | |
1038 | ||
1039 | def get_log(self, comments=True, repo=True, limit=None): | |
1040 | entries = [] | |
1041 | ||
fd681905 MT |
1042 | # Created entry. |
1043 | created_entry = logs.CreatedLogEntry(self.pakfire, self) | |
1044 | entries.append(created_entry) | |
1045 | ||
f6e6ff79 MT |
1046 | if comments: |
1047 | entries += self.get_comments(limit=limit) | |
1048 | ||
1049 | if repo: | |
1050 | entries += self.get_repo_moves(limit=limit) | |
1051 | ||
1052 | # Sort all entries in chronological order. | |
1053 | entries.sort() | |
1054 | ||
1055 | if limit: | |
1056 | entries = entries[:limit] | |
1057 | ||
1058 | return entries | |
1059 | ||
1060 | ## Watchers stuff | |
1061 | ||
1062 | def get_watchers(self): | |
1063 | query = self.db.query("SELECT DISTINCT user_id AS id FROM builds_watchers \ | |
1064 | JOIN users ON builds_watchers.user_id = users.id \ | |
1065 | WHERE builds_watchers.build_id = %s AND NOT users.deleted = 'Y' \ | |
1066 | AND users.activated = 'Y' ORDER BY users.id", self.id) | |
1067 | ||
1068 | return [users.User(self.pakfire, u.id) for u in query] | |
1069 | ||
1070 | def add_watcher(self, user): | |
1071 | # Don't add a user twice. | |
1072 | if user in self.get_watchers(): | |
1073 | return | |
1074 | ||
1075 | self.db.execute("INSERT INTO builds_watchers(build_id, user_id) \ | |
1076 | VALUES(%s, %s)", self.id, user.id) | |
1077 | ||
1078 | @property | |
1079 | def message_recipients(self): | |
1080 | ret = [] | |
1081 | ||
1082 | for watcher in self.get_watchers(): | |
1083 | ret.append("%s <%s>" % (watcher.realname, watcher.email)) | |
1084 | ||
1085 | return ret | |
1086 | ||
1087 | @property | |
1088 | def update(self): | |
1089 | if self._update is None: | |
1090 | update = self.db.get("SELECT update_id AS id FROM updates_builds \ | |
1091 | WHERE build_id = %s", self.id) | |
1092 | ||
1093 | if update: | |
1094 | self._update = updates.Update(self.pakfire, update.id) | |
1095 | ||
1096 | return self._update | |
1097 | ||
1098 | @property | |
1099 | def repo(self): | |
1100 | if self._repo is None: | |
1101 | repo = self.db.get("SELECT repo_id AS id FROM repositories_builds \ | |
1102 | WHERE build_id = %s", self.id) | |
1103 | ||
1104 | if repo: | |
1105 | self._repo = repository.Repository(self.pakfire, repo.id) | |
1106 | ||
1107 | return self._repo | |
1108 | ||
1109 | def get_repo_moves(self, limit=None): | |
1110 | query = "SELECT * FROM repositories_history \ | |
1111 | WHERE build_id = %s ORDER BY time ASC" | |
1112 | ||
1113 | actions = [] | |
1114 | for action in self.db.query(query, self.id): | |
1115 | action = logs.RepositoryLogEntry(self.pakfire, action) | |
1116 | actions.append(action) | |
1117 | ||
1118 | return actions | |
1119 | ||
1120 | @property | |
1121 | def is_loose(self): | |
1122 | if self.repo: | |
1123 | return False | |
1124 | ||
1125 | return True | |
1126 | ||
1127 | @property | |
1128 | def repo_time(self): | |
1129 | repo = self.db.get("SELECT time_added FROM repositories_builds \ | |
1130 | WHERE build_id = %s", self.id) | |
1131 | ||
1132 | if repo: | |
1133 | return repo.time_added | |
1134 | ||
1135 | def get_auto_move(self): | |
1136 | return self.data.auto_move == "Y" | |
1137 | ||
1138 | def set_auto_move(self, state): | |
1139 | if state: | |
1140 | state = "Y" | |
1141 | else: | |
1142 | state = "N" | |
1143 | ||
1144 | self.db.execute("UPDATE builds SET auto_move = %s WHERE id = %s", self.id) | |
1145 | if self._data: | |
1146 | self._data["auto_move"] = state | |
1147 | ||
1148 | auto_move = property(get_auto_move, set_auto_move) | |
1149 | ||
1150 | @property | |
1151 | def can_move_forward(self): | |
1152 | if not self.repo: | |
1153 | return False | |
1154 | ||
1155 | # If there is no next repository, we cannot move anything. | |
1156 | next_repo = self.repo.next() | |
1157 | ||
1158 | if not next_repo: | |
1159 | return False | |
1160 | ||
1161 | # If the needed amount of score is reached, we can move forward. | |
1162 | if self.score >= next_repo.score_needed: | |
1163 | return True | |
1164 | ||
1165 | # If the repository does not require a minimal time, | |
1166 | # we can move forward immediately. | |
1167 | if not self.repo.time_min: | |
1168 | return True | |
1169 | ||
1170 | query = self.db.get("SELECT NOW() - time_added AS duration FROM repositories_builds \ | |
1171 | WHERE build_id = %s", self.id) | |
1172 | duration = query.duration | |
1173 | ||
1174 | if duration >= self.repo.time_min: | |
1175 | return True | |
1176 | ||
1177 | return False | |
1178 | ||
1179 | ## Bugs | |
1180 | ||
1181 | def get_bug_ids(self): | |
1182 | query = self.db.query("SELECT bug_id FROM builds_bugs \ | |
1183 | WHERE build_id = %s", self.id) | |
1184 | ||
1185 | return [b.bug_id for b in query] | |
1186 | ||
1187 | def add_bug(self, bug_id, user=None, log=True): | |
1188 | # Check if this bug is already in the list of bugs. | |
1189 | if bug_id in self.get_bug_ids(): | |
1190 | return | |
1191 | ||
1192 | self.db.execute("INSERT INTO builds_bugs(build_id, bug_id) \ | |
1193 | VALUES(%s, %s)", self.id, bug_id) | |
1194 | ||
1195 | # Log the event. | |
1196 | if log: | |
1197 | self.log("bug_added", user=user, bug_id=bug_id) | |
1198 | ||
1199 | def rem_bug(self, bug_id, user=None, log=True): | |
1200 | self.db.execute("DELETE FROM builds_bugs WHERE build_id = %s AND \ | |
1201 | bug_id = %s", self.id, bug_id) | |
1202 | ||
1203 | # Log the event. | |
1204 | if log: | |
1205 | self.log("bug_removed", user=user, bug_id=bug_id) | |
1206 | ||
1207 | def search_for_bugs(self): | |
1208 | if not self.commit: | |
1209 | return | |
1210 | ||
1211 | pattern = re.compile(r"(bug\s?|#)(\d+)") | |
1212 | ||
1213 | for txt in (self.commit.subject, self.commit.message): | |
1214 | for bug in re.finditer(pattern, txt): | |
1215 | try: | |
1216 | bugid = int(bug.group(2)) | |
1217 | except ValueError: | |
1218 | continue | |
1219 | ||
1220 | # Check if a bug with the given ID exists in BZ. | |
1221 | bug = self.pakfire.bugzilla.get_bug(bugid) | |
1222 | if not bug: | |
1223 | continue | |
1224 | ||
1225 | self.add_bug(bugid) | |
1226 | ||
1227 | def get_bugs(self): | |
1228 | bugs = [] | |
1229 | for bug_id in self.get_bug_ids(): | |
1230 | bug = self.pakfire.bugzilla.get_bug(bug_id) | |
1231 | if not bug: | |
1232 | continue | |
1233 | ||
1234 | bugs.append(bug) | |
1235 | ||
1236 | return bugs | |
1237 | ||
1238 | def _update_bugs_helper(self, repo): | |
1239 | """ | |
1240 | This function takes a new status and generates messages that | |
1241 | are appended to all bugs. | |
1242 | """ | |
1243 | try: | |
1244 | kwargs = BUG_MESSAGES[repo.type].copy() | |
1245 | except KeyError: | |
1246 | return | |
1247 | ||
1248 | baseurl = self.pakfire.settings.get("baseurl", "") | |
1249 | args = { | |
1250 | "build_url" : "%s/build/%s" % (baseurl, self.uuid), | |
1251 | "distro_name" : self.distro.name, | |
1252 | "package_name" : self.name, | |
1253 | "repo_name" : repo.name, | |
1254 | } | |
1255 | kwargs["comment"] = kwargs["comment"] % args | |
1256 | ||
1257 | self.update_bugs(**kwargs) | |
1258 | ||
1259 | def _update_bug(self, bug_id, status=None, resolution=None, comment=None): | |
1260 | self.db.execute("INSERT INTO builds_bugs_updates(bug_id, status, resolution, comment, time) \ | |
1261 | VALUES(%s, %s, %s, %s, NOW())", bug_id, status, resolution, comment) | |
1262 | ||
1263 | def update_bugs(self, status, resolution=None, comment=None): | |
1264 | # Update all bugs linked to this build. | |
1265 | for bug_id in self.get_bug_ids(): | |
1266 | self._update_bug(bug_id, status=status, resolution=resolution, comment=comment) | |
1267 | ||
1268 | ||
1269 | class Jobs(base.Object): | |
6ea0393f MT |
1270 | def get_by_id(self, id, data=None): |
1271 | return Job(self.pakfire, id, data) | |
f6e6ff79 MT |
1272 | |
1273 | def get_by_uuid(self, uuid): | |
1274 | job = self.db.get("SELECT id FROM jobs WHERE uuid = %s", uuid) | |
1275 | ||
1276 | if job: | |
1277 | return self.get_by_id(job.id) | |
1278 | ||
1279 | def get_by_build(self, build_id, build=None, type=None): | |
1280 | """ | |
1281 | Get all jobs in the specifies build. | |
1282 | """ | |
9fa1787c | 1283 | query = "SELECT * FROM jobs WHERE build_id = %s" |
f6e6ff79 MT |
1284 | args = [build_id,] |
1285 | ||
1286 | if type: | |
1287 | query += " AND type = %s" | |
1288 | args.append(type) | |
1289 | ||
1290 | # Get IDs of all builds in this group. | |
1291 | jobs = [] | |
1292 | for job in self.db.query(query, *args): | |
9fa1787c | 1293 | job = Job(self.pakfire, job.id, job) |
f6e6ff79 MT |
1294 | |
1295 | # If the Build object was set, we set it so it won't be retrieved | |
1296 | # from the database again. | |
1297 | if build: | |
1298 | job._build = build | |
1299 | ||
1300 | jobs.append(job) | |
1301 | ||
1302 | # Return sorted list of jobs. | |
1303 | return sorted(jobs) | |
1304 | ||
163d9d8b MT |
1305 | def get_active(self, host_id=None, builder=None, states=None): |
1306 | if builder: | |
1307 | host_id = builder.id | |
f6e6ff79 | 1308 | |
163d9d8b MT |
1309 | if states is None: |
1310 | states = ["dispatching", "running", "uploading"] | |
f6e6ff79 | 1311 | |
163d9d8b MT |
1312 | query = "SELECT * FROM jobs WHERE state IN (%s)" % ", ".join(["%s"] * len(states)) |
1313 | args = states | |
f6e6ff79 MT |
1314 | |
1315 | if host_id: | |
1316 | query += " AND builder_id = %s" % host_id | |
1317 | ||
6e63ed49 MT |
1318 | query += " ORDER BY \ |
1319 | CASE \ | |
1320 | WHEN jobs.state = 'running' THEN 0 \ | |
1321 | WHEN jobs.state = 'uploading' THEN 1 \ | |
1322 | WHEN jobs.state = 'dispatching' THEN 2 \ | |
1323 | WHEN jobs.state = 'pending' THEN 3 \ | |
1324 | WHEN jobs.state = 'new' THEN 4 \ | |
1325 | END, time_started ASC" | |
f6e6ff79 | 1326 | |
163d9d8b | 1327 | return [Job(self.pakfire, j.id, j) for j in self.db.query(query, *args)] |
f6e6ff79 | 1328 | |
163d9d8b MT |
1329 | def get_next_iter(self, *args, **kwargs): |
1330 | return iter(self.get_next(*args, **kwargs)) | |
f6e6ff79 | 1331 | |
163d9d8b MT |
1332 | def get_next(self, arches=None, builder=None, limit=None, offset=None, type=None, |
1333 | state=None, states=None, max_tries=None): | |
f6e6ff79 | 1334 | |
163d9d8b MT |
1335 | if state is None and states is None: |
1336 | states = ["pending", "new"] | |
1337 | ||
1338 | if builder and arches is None: | |
1339 | arches = builder.get_arches() | |
1340 | ||
1341 | query = "SELECT jobs.* FROM jobs \ | |
1342 | JOIN builds ON jobs.build_id = builds.id \ | |
1343 | WHERE \ | |
1344 | (start_not_before IS NULL OR start_not_before <= NOW())" | |
1345 | args = [] | |
f6e6ff79 MT |
1346 | |
1347 | if arches: | |
163d9d8b MT |
1348 | query += " AND jobs.arch_id IN (%s)" % ", ".join(["%s"] * len(arches)) |
1349 | args.extend([a.id for a in arches]) | |
1350 | ||
1351 | if builder: | |
1352 | #query += " AND (jobs.builder_id = %s OR jobs.builder_id IS NULL)" | |
1353 | #args.append(builder.id) | |
1354 | ||
1355 | # Check out which types of builds this builder builds. | |
1356 | build_types = [] | |
1357 | for build_type in builder.build_types: | |
1358 | if build_type == "release": | |
1359 | build_types.append("(builds.type = 'release' AND jobs.type = 'build')") | |
1360 | elif build_type == "scratch": | |
1361 | build_types.append("(builds.type = 'scratch' AND jobs.type = 'build')") | |
1362 | elif build_type == "test": | |
1363 | build_types.append("jobs.type = 'test'") | |
1364 | ||
1365 | if build_types: | |
1366 | query += " AND (%s)" % " OR ".join(build_types) | |
f6e6ff79 | 1367 | |
f6e6ff79 | 1368 | if max_tries: |
163d9d8b | 1369 | query += " AND jobs.max_tries <= %s" |
f6e6ff79 MT |
1370 | args.append(max_tries) |
1371 | ||
163d9d8b MT |
1372 | if state: |
1373 | query += " AND jobs.state = %s" | |
1374 | args.append(state) | |
f6e6ff79 | 1375 | |
163d9d8b MT |
1376 | if states: |
1377 | query += " AND jobs.state IN (%s)" % ", ".join(["%s"] * len(states)) | |
1378 | args.extend(states) | |
1379 | ||
1380 | if type: | |
1381 | query += " AND jobs.type = %s" | |
1382 | args.append(type) | |
1383 | ||
1384 | # Order builds. | |
1385 | # Release builds and scratch builds are more important than test builds. | |
1386 | # Builds are sorted by priority and older builds are preferred. | |
f6e6ff79 | 1387 | |
f6e6ff79 | 1388 | query += " ORDER BY \ |
163d9d8b MT |
1389 | CASE \ |
1390 | WHEN jobs.state = 'pending' THEN 0 \ | |
1391 | WHEN jobs.state = 'new' THEN 1 \ | |
1392 | END, \ | |
f6e6ff79 MT |
1393 | CASE \ |
1394 | WHEN jobs.type = 'build' THEN 0 \ | |
1395 | WHEN jobs.type = 'test' THEN 1 \ | |
1396 | END, \ | |
1397 | builds.priority DESC, jobs.time_created ASC" | |
1398 | ||
1399 | if limit: | |
163d9d8b MT |
1400 | query += " LIMIT %s" |
1401 | args.append(limit) | |
f6e6ff79 | 1402 | |
f6e6ff79 | 1403 | jobs = [] |
163d9d8b MT |
1404 | for row in self.db.query(query, *args): |
1405 | job = self.pakfire.jobs.get_by_id(row.id, row) | |
f6e6ff79 MT |
1406 | jobs.append(job) |
1407 | ||
1408 | return jobs | |
1409 | ||
9177f86a | 1410 | def get_latest(self, arch=None, builder=None, limit=None, age=None, date=None): |
9fa1787c | 1411 | query = "SELECT * FROM jobs" |
6e63ed49 | 1412 | args = [] |
f6e6ff79 | 1413 | |
6e63ed49 | 1414 | where = ["(state = 'finished' OR state = 'failed' OR state = 'aborted')"] |
9177f86a MT |
1415 | |
1416 | if arch: | |
1417 | where.append("arch_id = %s") | |
1418 | args.append(arch.id) | |
1419 | ||
f6e6ff79 | 1420 | if builder: |
6e63ed49 MT |
1421 | where.append("builder_id = %s") |
1422 | args.append(builder.id) | |
1423 | ||
1424 | if date: | |
6e63ed49 | 1425 | try: |
9177f86a | 1426 | year, month, day = date.split("-", 2) |
6e63ed49 MT |
1427 | date = datetime.date(int(year), int(month), int(day)) |
1428 | except ValueError: | |
1429 | pass | |
6e63ed49 | 1430 | else: |
9177f86a MT |
1431 | where.append("(DATE(time_created) = %s OR \ |
1432 | DATE(time_started) = %s OR DATE(time_finished) = %s)") | |
1433 | args += (date, date, date) | |
6e63ed49 MT |
1434 | |
1435 | if age: | |
1436 | where.append("time_finished >= DATE_SUB(NOW(), INTERVAL %s)" % age) | |
f6e6ff79 MT |
1437 | |
1438 | if where: | |
1439 | query += " WHERE %s" % " AND ".join(where) | |
1440 | ||
6e63ed49 MT |
1441 | query += " ORDER BY time_finished DESC" |
1442 | ||
1443 | if limit: | |
1444 | query += " LIMIT %s" | |
1445 | args.append(limit) | |
f6e6ff79 | 1446 | |
6e63ed49 | 1447 | return [Job(self.pakfire, j.id, j) for j in self.db.query(query, *args)] |
f6e6ff79 MT |
1448 | |
1449 | def get_average_build_time(self): | |
1450 | """ | |
1451 | Returns the average build time of all finished builds from the | |
1452 | last 3 months. | |
1453 | """ | |
1454 | cache_key = "jobs_avg_build_time" | |
1455 | ||
1456 | build_time = self.cache.get(cache_key) | |
1457 | if not build_time: | |
1458 | result = self.db.get("SELECT AVG(time_finished - time_started) as average \ | |
1459 | FROM jobs WHERE type = 'build' AND state = 'finished' AND \ | |
1460 | time_finished >= DATE_SUB(NOW(), INTERVAL 3 MONTH)") | |
1461 | ||
1462 | build_time = result.average or 0 | |
1463 | self.cache.set(cache_key, build_time, 3600) | |
1464 | ||
1465 | return build_time | |
1466 | ||
1467 | def count(self, *states): | |
1468 | states = sorted(states) | |
1469 | ||
1470 | cache_key = "jobs_count_%s" % ("-".join(states) or "all") | |
1471 | ||
1472 | count = self.cache.get(cache_key) | |
1473 | if count is None: | |
1474 | query = "SELECT COUNT(*) AS count FROM jobs" | |
1475 | args = [] | |
1476 | ||
1477 | if states: | |
1478 | query += " WHERE %s" % " OR ".join("state = %s" for s in states) | |
1479 | args += states | |
1480 | ||
1481 | jobs = self.db.get(query, *args) | |
1482 | ||
1483 | count = jobs.count | |
1484 | self.cache.set(cache_key, count, 60) | |
1485 | ||
1486 | return count | |
1487 | ||
1488 | ||
1489 | class Job(base.Object): | |
9fa1787c | 1490 | def __init__(self, pakfire, id, data=None): |
f6e6ff79 MT |
1491 | base.Object.__init__(self, pakfire) |
1492 | ||
1493 | # The ID of this Job object. | |
1494 | self.id = id | |
1495 | ||
1496 | # Cache the data of this object. | |
9fa1787c | 1497 | self._data = data |
f6e6ff79 MT |
1498 | self._build = None |
1499 | self._builder = None | |
1500 | self._packages = None | |
1501 | self._logfiles = None | |
1502 | ||
1503 | def __str__(self): | |
1504 | return "<%s id=%s %s>" % (self.__class__.__name__, self.id, self.name) | |
1505 | ||
1506 | def __cmp__(self, other): | |
1507 | if self.type == "build" and other.type == "test": | |
1508 | return -1 | |
1509 | elif self.type == "test" and other.type == "build": | |
1510 | return 1 | |
1511 | ||
1512 | if self.build_id == other.build_id: | |
1513 | return cmp(self.arch, other.arch) | |
1514 | ||
1515 | ret = cmp(self.pkg, other.pkg) | |
1516 | ||
1517 | if not ret: | |
1518 | ret = cmp(self.time_created, other.time_created) | |
1519 | ||
1520 | return ret | |
1521 | ||
1522 | @property | |
1523 | def distro(self): | |
1524 | assert self.build.distro | |
1525 | return self.build.distro | |
1526 | ||
1527 | @property | |
1528 | def cache_key(self): | |
1529 | return "job_%s" % self.id | |
1530 | ||
1531 | def clear_cache(self): | |
1532 | """ | |
1533 | Clear the stored data from the cache. | |
1534 | """ | |
1535 | self.cache.delete(self.cache_key) | |
1536 | ||
1537 | @classmethod | |
1538 | def create(cls, pakfire, build, arch, type="build"): | |
1539 | id = pakfire.db.execute("INSERT INTO jobs(uuid, type, build_id, arch_id, time_created) \ | |
1540 | VALUES(%s, %s, %s, %s, NOW())", "%s" % uuid.uuid4(), type, build.id, arch.id) | |
1541 | ||
1542 | job = Job(pakfire, id) | |
1543 | job.log("created") | |
1544 | ||
1545 | # Set cache for Build object. | |
1546 | job._build = build | |
1547 | ||
1548 | # Jobs are by default in state "new" and wait for being checked | |
1549 | # for dependencies. Packages that do have no build dependencies | |
1550 | # can directly be forwarded to "pending" state. | |
1551 | if not job.pkg.requires: | |
1552 | job.state = "pending" | |
1553 | ||
1554 | return job | |
1555 | ||
1556 | def delete(self): | |
1557 | self.__delete_buildroots() | |
1558 | self.__delete_history() | |
1559 | self.__delete_packages() | |
1560 | self.__delete_logfiles() | |
1561 | ||
1562 | # Delete the job itself. | |
1563 | self.db.execute("DELETE FROM jobs WHERE id = %s", self.id) | |
1564 | self.clear_cache() | |
1565 | ||
1566 | def __delete_buildroots(self): | |
1567 | """ | |
1568 | Removes all buildroots. | |
1569 | """ | |
1570 | self.db.execute("DELETE FROM jobs_buildroots WHERE job_id = %s", self.id) | |
1571 | ||
1572 | def __delete_history(self): | |
1573 | """ | |
1574 | Removes all references in the history to this build job. | |
1575 | """ | |
1576 | self.db.execute("DELETE FROM jobs_history WHERE job_id = %s", self.id) | |
1577 | ||
1578 | def __delete_packages(self): | |
1579 | """ | |
1580 | Deletes all uploaded files from the job. | |
1581 | """ | |
1582 | for pkg in self.packages: | |
1583 | pkg.delete() | |
1584 | ||
1585 | self.db.execute("DELETE FROM jobs_packages WHERE job_id = %s", self.id) | |
1586 | ||
1587 | def __delete_logfiles(self): | |
1588 | for logfile in self.logfiles: | |
1589 | self.db.execute("INSERT INTO queue_delete(path) VALUES(%s)", logfile.path) | |
1590 | ||
1591 | def reset(self, user=None): | |
1592 | self.__delete_buildroots() | |
1593 | self.__delete_packages() | |
1594 | self.__delete_history() | |
1595 | self.__delete_logfiles() | |
1596 | ||
1597 | self.state = "new" | |
1598 | self.log("reset", user=user) | |
1599 | ||
1600 | @property | |
1601 | def data(self): | |
1602 | if self._data is None: | |
163d9d8b | 1603 | self._data = self.db.get("SELECT * FROM jobs WHERE id = %s", self.id) |
f6e6ff79 MT |
1604 | assert self._data |
1605 | ||
1606 | return self._data | |
1607 | ||
1608 | ## Logging stuff | |
1609 | ||
1610 | def log(self, action, user=None, state=None, builder=None, test_job=None): | |
1611 | user_id = None | |
1612 | if user: | |
1613 | user_id = user.id | |
1614 | ||
1615 | builder_id = None | |
1616 | if builder: | |
1617 | builder_id = builder.id | |
1618 | ||
1619 | test_job_id = None | |
1620 | if test_job: | |
1621 | test_job_id = test_job.id | |
1622 | ||
1623 | self.db.execute("INSERT INTO jobs_history(job_id, action, state, user_id, \ | |
1624 | time, builder_id, test_job_id) VALUES(%s, %s, %s, %s, NOW(), %s, %s)", | |
1625 | self.id, action, state, user_id, builder_id, test_job_id) | |
1626 | ||
1627 | def get_log(self, limit=None, offset=None, user=None): | |
1628 | query = "SELECT * FROM jobs_history" | |
1629 | ||
1630 | conditions = ["job_id = %s",] | |
1631 | args = [self.id,] | |
1632 | ||
1633 | if user: | |
1634 | conditions.append("user_id = %s") | |
1635 | args.append(user.id) | |
1636 | ||
1637 | if conditions: | |
1638 | query += " WHERE %s" % " AND ".join(conditions) | |
1639 | ||
1640 | query += " ORDER BY time DESC" | |
1641 | ||
1642 | if limit: | |
1643 | if offset: | |
1644 | query += " LIMIT %s,%s" | |
1645 | args += [offset, limit,] | |
1646 | else: | |
1647 | query += " LIMIT %s" | |
1648 | args += [limit,] | |
1649 | ||
1650 | entries = [] | |
1651 | for entry in self.db.query(query, *args): | |
1652 | entry = logs.JobLogEntry(self.pakfire, entry) | |
1653 | entries.append(entry) | |
1654 | ||
1655 | return entries | |
1656 | ||
1657 | @property | |
1658 | def uuid(self): | |
1659 | return self.data.uuid | |
1660 | ||
1661 | @property | |
1662 | def type(self): | |
1663 | return self.data.type | |
1664 | ||
1665 | @property | |
1666 | def build_id(self): | |
1667 | return self.data.build_id | |
1668 | ||
1669 | @property | |
1670 | def build(self): | |
1671 | if self._build is None: | |
1672 | self._build = self.pakfire.builds.get_by_id(self.build_id) | |
1673 | assert self._build | |
1674 | ||
1675 | return self._build | |
1676 | ||
1677 | @property | |
1678 | def related_jobs(self): | |
1679 | ret = [] | |
1680 | ||
1681 | for job in self.build.jobs: | |
1682 | if job == self: | |
1683 | continue | |
1684 | ||
1685 | ret.append(job) | |
1686 | ||
1687 | return ret | |
1688 | ||
1689 | @property | |
1690 | def pkg(self): | |
1691 | return self.build.pkg | |
1692 | ||
1693 | @property | |
1694 | def name(self): | |
1695 | return "%s-%s.%s" % (self.pkg.name, self.pkg.friendly_version, self.arch.name) | |
1696 | ||
eedc6432 MT |
1697 | @property |
1698 | def size(self): | |
1699 | return sum((p.size for p in self.packages)) | |
1700 | ||
f6e6ff79 MT |
1701 | def get_state(self): |
1702 | return self.data.state | |
1703 | ||
1704 | def set_state(self, state, user=None, log=True): | |
1705 | # Nothing to do if the state remains. | |
1706 | if not self.state == state: | |
1707 | self.db.execute("UPDATE jobs SET state = %s WHERE id = %s", state, self.id) | |
1708 | self.clear_cache() | |
1709 | ||
1710 | # Log the event. | |
1711 | if log and not state == "new": | |
1712 | self.log("state_change", state=state, user=user) | |
1713 | ||
1714 | # Update cache. | |
1715 | if self._data: | |
1716 | self._data["state"] = state | |
1717 | ||
1718 | # Always clear the message when the status is changed. | |
1719 | self.update_message(None) | |
1720 | ||
1721 | # Update some more informations. | |
1722 | if state == "dispatching": | |
1723 | # Set start time. | |
1724 | self.db.execute("UPDATE jobs SET time_started = NOW(), time_finished = NULL \ | |
1725 | WHERE id = %s", self.id) | |
1726 | ||
1727 | elif state == "pending": | |
1728 | self.db.execute("UPDATE jobs SET tries = tries + 1, time_started = NULL, \ | |
1729 | time_finished = NULL WHERE id = %s", self.id) | |
1730 | ||
1731 | elif state in ("aborted", "dependency_error", "finished", "failed"): | |
163d9d8b MT |
1732 | # Set finish time and reset builder.. |
1733 | self.db.execute("UPDATE jobs SET time_finished = NOW() WHERE id = %s", self.id) | |
f6e6ff79 MT |
1734 | |
1735 | # Send messages to the user. | |
1736 | if state == "finished": | |
1737 | self.send_finished_message() | |
1738 | ||
1739 | elif state == "failed": | |
1740 | # Remove all package files if a job is set to failed state. | |
1741 | self.__delete_packages() | |
1742 | ||
1743 | self.send_failed_message() | |
1744 | ||
1745 | # Automatically update the state of the build (not on test builds). | |
1746 | if self.type == "build": | |
1747 | self.build.auto_update_state() | |
1748 | ||
1749 | state = property(get_state, set_state) | |
1750 | ||
1751 | @property | |
1752 | def message(self): | |
1753 | return self.data.message | |
1754 | ||
1755 | def update_message(self, msg): | |
1756 | self.db.execute("UPDATE jobs SET message = %s WHERE id = %s", | |
1757 | msg, self.id) | |
1758 | self.clear_cache() | |
1759 | ||
1760 | if self._data: | |
1761 | self._data["message"] = msg | |
1762 | ||
1763 | @property | |
1764 | def builder_id(self): | |
1765 | return self.data.builder_id | |
1766 | ||
1767 | def get_builder(self): | |
1768 | if not self.builder_id: | |
1769 | return | |
1770 | ||
1771 | if self._builder is None: | |
1772 | self._builder = builders.Builder(self.pakfire, self.builder_id) | |
1773 | assert self._builder | |
1774 | ||
1775 | return self._builder | |
1776 | ||
1777 | def set_builder(self, builder, user=None): | |
1778 | self.db.execute("UPDATE jobs SET builder_id = %s WHERE id = %s", | |
1779 | builder.id, self.id) | |
1780 | ||
1781 | # Update cache. | |
1782 | if self._data: | |
1783 | self._data["builder_id"] = builder.id | |
1784 | self.clear_cache() | |
1785 | ||
1786 | self._builder = builder | |
1787 | ||
1788 | # Log the event. | |
1789 | if user: | |
1790 | self.log("builder_assigned", builder=builder, user=user) | |
1791 | ||
1792 | builder = property(get_builder, set_builder) | |
1793 | ||
1794 | @property | |
1795 | def arch_id(self): | |
1796 | return self.data.arch_id | |
1797 | ||
1798 | @property | |
1799 | def arch(self): | |
1800 | return self.pakfire.arches.get_by_id(self.arch_id) | |
1801 | ||
1802 | @property | |
1803 | def duration(self): | |
1804 | if not self.time_started: | |
1805 | return 0 | |
1806 | ||
1807 | if self.time_finished: | |
1808 | delta = self.time_finished - self.time_started | |
1809 | else: | |
1810 | delta = datetime.datetime.utcnow() - self.time_started | |
1811 | ||
1812 | return delta.total_seconds() | |
1813 | ||
1814 | @property | |
1815 | def time_created(self): | |
1816 | return self.data.time_created | |
1817 | ||
1818 | @property | |
1819 | def time_started(self): | |
1820 | return self.data.time_started | |
1821 | ||
1822 | @property | |
1823 | def time_finished(self): | |
1824 | return self.data.time_finished | |
1825 | ||
1826 | @property | |
1827 | def tries(self): | |
1828 | return self.data.tries | |
1829 | ||
1830 | @property | |
1831 | def packages(self): | |
1832 | if self._packages is None: | |
1833 | self._packages = [] | |
1834 | ||
1835 | query = "SELECT pkg_id AS id FROM jobs_packages \ | |
1836 | JOIN packages ON packages.id = jobs_packages.pkg_id \ | |
1837 | WHERE jobs_packages.job_id = %s ORDER BY packages.name" | |
1838 | ||
1839 | for pkg in self.db.query(query, self.id): | |
1840 | pkg = packages.Package(self.pakfire, pkg.id) | |
1841 | pkg._job = self | |
1842 | ||
1843 | self._packages.append(pkg) | |
1844 | ||
1845 | return self._packages | |
1846 | ||
1847 | def get_pkg_by_uuid(self, uuid): | |
1848 | pkg = self.db.get("SELECT packages.id FROM packages \ | |
1849 | JOIN jobs_packages ON jobs_packages.pkg_id = packages.id \ | |
1850 | WHERE jobs_packages.job_id = %s AND packages.uuid = %s", | |
1851 | self.id, uuid) | |
1852 | ||
1853 | if not pkg: | |
1854 | return | |
1855 | ||
1856 | pkg = packages.Package(self.pakfire, pkg.id) | |
1857 | pkg._job = self | |
1858 | ||
1859 | return pkg | |
1860 | ||
1861 | @property | |
1862 | def logfiles(self): | |
1863 | if self._logfiles is None: | |
1864 | self._logfiles = [] | |
1865 | ||
1866 | for log in self.db.query("SELECT id FROM logfiles WHERE job_id = %s", self.id): | |
1867 | log = logs.LogFile(self.pakfire, log.id) | |
1868 | log._job = self | |
1869 | ||
1870 | self._logfiles.append(log) | |
1871 | ||
1872 | return self._logfiles | |
1873 | ||
1874 | def add_file(self, filename): | |
1875 | """ | |
1876 | Add the specified file to this job. | |
1877 | ||
1878 | The file is copied to the right directory by this function. | |
1879 | """ | |
1880 | assert os.path.exists(filename) | |
1881 | ||
1882 | if filename.endswith(".log"): | |
1883 | self._add_file_log(filename) | |
1884 | ||
1885 | elif filename.endswith(".%s" % PACKAGE_EXTENSION): | |
1886 | # It is not allowed to upload packages on test builds. | |
1887 | if self.type == "test": | |
1888 | return | |
1889 | ||
1890 | self._add_file_package(filename) | |
1891 | ||
1892 | def _add_file_log(self, filename): | |
1893 | """ | |
1894 | Attach a log file to this job. | |
1895 | """ | |
1896 | target_dirname = os.path.join(self.build.path, "logs") | |
1897 | ||
1898 | if self.type == "test": | |
1899 | i = 1 | |
1900 | while True: | |
1901 | target_filename = os.path.join(target_dirname, | |
1902 | "test.%s.%s.%s.log" % (self.arch.name, i, self.tries)) | |
1903 | ||
1904 | if os.path.exists(target_filename): | |
1905 | i += 1 | |
1906 | else: | |
1907 | break | |
1908 | else: | |
1909 | target_filename = os.path.join(target_dirname, | |
1910 | "build.%s.%s.log" % (self.arch.name, self.tries)) | |
1911 | ||
1912 | # Make sure the target directory exists. | |
1913 | if not os.path.exists(target_dirname): | |
1914 | os.makedirs(target_dirname) | |
1915 | ||
1916 | # Calculate a SHA512 hash from that file. | |
1917 | f = open(filename, "rb") | |
1918 | h = hashlib.sha512() | |
1919 | while True: | |
1920 | buf = f.read(BUFFER_SIZE) | |
1921 | if not buf: | |
1922 | break | |
1923 | ||
1924 | h.update(buf) | |
1925 | f.close() | |
1926 | ||
1927 | # Copy the file to the final location. | |
1928 | shutil.copy2(filename, target_filename) | |
1929 | ||
1930 | # Create an entry in the database. | |
1931 | self.db.execute("INSERT INTO logfiles(job_id, path, filesize, hash_sha512) \ | |
1932 | VALUES(%s, %s, %s, %s)", self.id, os.path.relpath(target_filename, PACKAGES_DIR), | |
1933 | os.path.getsize(target_filename), h.hexdigest()) | |
1934 | ||
1935 | def _add_file_package(self, filename): | |
1936 | # Open package (creates entry in the database). | |
1937 | pkg = packages.Package.open(self.pakfire, filename) | |
1938 | ||
1939 | # Move package to the build directory. | |
1940 | pkg.move(os.path.join(self.build.path, self.arch.name)) | |
1941 | ||
1942 | # Attach the package to this job. | |
1943 | self.db.execute("INSERT INTO jobs_packages(job_id, pkg_id) VALUES(%s, %s)", | |
1944 | self.id, pkg.id) | |
1945 | ||
1946 | def get_aborted_state(self): | |
1947 | return self.data.aborted_state | |
1948 | ||
1949 | def set_aborted_state(self, state): | |
1950 | self.db.execute("UPDATE jobs SET aborted_state = %s WHERE id = %s", | |
1951 | state, self.id) | |
1952 | self.clear_cache() | |
1953 | ||
1954 | if self._data: | |
1955 | self._data["aborted_state"] = state | |
1956 | ||
1957 | aborted_state = property(get_aborted_state, set_aborted_state) | |
1958 | ||
1959 | @property | |
1960 | def message_recipients(self): | |
1961 | l = [] | |
1962 | ||
1963 | # Add all people watching the build. | |
1964 | l += self.build.message_recipients | |
1965 | ||
1966 | # Add the package maintainer on release builds. | |
1967 | if self.build.type == "release": | |
1968 | maint = self.pkg.maintainer | |
1969 | ||
1970 | if isinstance(maint, users.User): | |
1971 | l.append("%s <%s>" % (maint.realname, maint.email)) | |
1972 | elif maint: | |
1973 | l.append(maint) | |
1974 | ||
1975 | # XXX add committer and commit author. | |
1976 | ||
1977 | # Add the owner of the scratch build on scratch builds. | |
1978 | elif self.build.type == "scratch" and self.build.user: | |
1979 | l.append("%s <%s>" % \ | |
1980 | (self.build.user.realname, self.build.user.email)) | |
1981 | ||
1982 | return set(l) | |
1983 | ||
1984 | def save_buildroot(self, pkgs): | |
1985 | rows = [] | |
1986 | ||
1987 | for pkg_name, pkg_uuid in pkgs: | |
1988 | rows.append((self.id, self.tries, pkg_uuid, pkg_name)) | |
1989 | ||
1990 | # Cleanup old stuff first (for rebuilding packages). | |
1991 | self.db.execute("DELETE FROM jobs_buildroots WHERE job_id = %s AND tries = %s", | |
1992 | self.id, self.tries) | |
1993 | ||
1994 | self.db.executemany("INSERT INTO \ | |
1995 | jobs_buildroots(job_id, tries, pkg_uuid, pkg_name) \ | |
1996 | VALUES(%s, %s, %s, %s)", rows) | |
1997 | ||
1998 | def has_buildroot(self, tries=None): | |
1999 | if tries is None: | |
2000 | tries = self.tries | |
2001 | ||
2002 | res = self.db.get("SELECT COUNT(*) AS num FROM jobs_buildroots \ | |
2003 | WHERE jobs_buildroots.job_id = %s AND jobs_buildroots.tries = %s \ | |
2004 | ORDER BY pkg_name", self.id, tries) | |
2005 | ||
2006 | if res: | |
2007 | return res.num | |
2008 | ||
2009 | return 0 | |
2010 | ||
2011 | def get_buildroot(self, tries=None): | |
2012 | if tries is None: | |
2013 | tries = self.tries | |
2014 | ||
2015 | rows = self.db.query("SELECT * FROM jobs_buildroots \ | |
2016 | WHERE jobs_buildroots.job_id = %s AND jobs_buildroots.tries = %s \ | |
2017 | ORDER BY pkg_name", self.id, tries) | |
2018 | ||
2019 | pkgs = [] | |
2020 | for row in rows: | |
2021 | # Search for this package in the packages table. | |
2022 | pkg = self.pakfire.packages.get_by_uuid(row.pkg_uuid) | |
2023 | pkgs.append((row.pkg_name, row.pkg_uuid, pkg)) | |
2024 | ||
2025 | return pkgs | |
2026 | ||
2027 | def send_finished_message(self): | |
2028 | # Send no finished mails for test jobs. | |
2029 | if self.type == "test": | |
2030 | return | |
2031 | ||
2032 | logging.debug("Sending finished message for job %s to %s" % \ | |
2033 | (self.name, ", ".join(self.message_recipients))) | |
2034 | ||
2035 | info = { | |
2036 | "build_name" : self.name, | |
2037 | "build_host" : self.builder.name, | |
2038 | "build_uuid" : self.uuid, | |
2039 | } | |
2040 | ||
2041 | self.pakfire.messages.send_to_all(self.message_recipients, | |
2042 | MSG_BUILD_FINISHED_SUBJECT, MSG_BUILD_FINISHED, info) | |
2043 | ||
2044 | def send_failed_message(self): | |
2045 | logging.debug("Sending failed message for job %s to %s" % \ | |
2046 | (self.name, ", ".join(self.message_recipients))) | |
2047 | ||
2048 | build_host = "--" | |
2049 | if self.builder: | |
2050 | build_host = self.builder.name | |
2051 | ||
2052 | info = { | |
2053 | "build_name" : self.name, | |
2054 | "build_host" : build_host, | |
2055 | "build_uuid" : self.uuid, | |
2056 | } | |
2057 | ||
2058 | self.pakfire.messages.send_to_all(self.message_recipients, | |
2059 | MSG_BUILD_FAILED_SUBJECT, MSG_BUILD_FAILED, info) | |
2060 | ||
2061 | def set_start_time(self, start_time): | |
2062 | if start_time is None: | |
2063 | return | |
2064 | ||
2065 | self.db.execute("UPDATE jobs SET start_not_before = NOW() + %s \ | |
2066 | WHERE id = %s LIMIT 1", start_time, self.id) | |
2067 | ||
2068 | def schedule(self, type, start_time=None, user=None): | |
2069 | assert type in ("rebuild", "test") | |
2070 | ||
2071 | if type == "rebuild": | |
2072 | if self.state == "finished": | |
2073 | return | |
2074 | ||
2075 | self.set_state("new", user=user, log=False) | |
2076 | self.set_start_time(start_time) | |
2077 | ||
2078 | # Log the event. | |
2079 | self.log("schedule_rebuild", user=user) | |
2080 | ||
2081 | elif type == "test": | |
2082 | if not self.state == "finished": | |
2083 | return | |
2084 | ||
2085 | # Create a new job with same build and arch. | |
2086 | job = self.create(self.pakfire, self.build, self.arch, type="test") | |
2087 | job.set_start_time(start_time) | |
2088 | ||
2089 | # Log the event. | |
2090 | self.log("schedule_test_job", test_job=job, user=user) | |
2091 | ||
2092 | return job | |
2093 | ||
2094 | def schedule_test(self, start_not_before=None, user=None): | |
2095 | # XXX to be removed | |
2096 | return self.schedule("test", start_time=start_not_before, user=user) | |
2097 | ||
2098 | def schedule_rebuild(self, start_not_before=None, user=None): | |
2099 | # XXX to be removed | |
2100 | return self.schedule("rebuild", start_time=start_not_before, user=user) | |
2101 | ||
2102 | def get_build_repos(self): | |
2103 | """ | |
2104 | Returns a list of all repositories that should be used when | |
2105 | building this job. | |
2106 | """ | |
2107 | repo_ids = self.db.query("SELECT repo_id FROM jobs_repos WHERE job_id = %s", | |
2108 | self.id) | |
2109 | ||
2110 | if not repo_ids: | |
2111 | return self.distro.get_build_repos() | |
2112 | ||
2113 | repos = [] | |
2114 | for repo in self.distro.repositories: | |
2115 | if repo.id in [r.id for r in repo_ids]: | |
2116 | repos.append(repo) | |
2117 | ||
2118 | return repos or self.distro.get_build_repos() | |
2119 | ||
2120 | def get_repo_config(self): | |
2121 | """ | |
2122 | Get repository configuration file that is sent to the builder. | |
2123 | """ | |
2124 | confs = [] | |
2125 | ||
2126 | for repo in self.get_build_repos(): | |
2127 | confs.append(repo.get_conf()) | |
2128 | ||
2129 | return "\n\n".join(confs) | |
2130 | ||
2131 | def get_config(self): | |
2132 | """ | |
2133 | Get configuration file that is sent to the builder. | |
2134 | """ | |
2135 | confs = [] | |
2136 | ||
2137 | # Add the distribution configuration. | |
2138 | confs.append(self.distro.get_config()) | |
2139 | ||
2140 | # Then add all repositories for this build. | |
2141 | confs.append(self.get_repo_config()) | |
2142 | ||
2143 | return "\n\n".join(confs) | |
2144 | ||
2145 | def used_by(self): | |
2146 | if not self.packages: | |
2147 | return [] | |
2148 | ||
2149 | conditions = [] | |
2150 | args = [] | |
2151 | ||
2152 | for pkg in self.packages: | |
2153 | conditions.append(" pkg_uuid = %s") | |
2154 | args.append(pkg.uuid) | |
2155 | ||
2156 | query = "SELECT DISTINCT job_id AS id FROM jobs_buildroots" | |
2157 | query += " WHERE %s" % " OR ".join(conditions) | |
2158 | ||
2159 | job_ids = self.db.query(query, *args) | |
2160 | ||
2161 | print job_ids | |
2162 | ||
2163 | def resolvdep(self): | |
2164 | config = pakfire.config.Config(files=["general.conf"]) | |
2165 | config.parse(self.get_config()) | |
2166 | ||
2167 | # The filename of the source file. | |
2168 | filename = os.path.join(PACKAGES_DIR, self.build.pkg.path) | |
2169 | assert os.path.exists(filename), filename | |
2170 | ||
2171 | # Create a new pakfire instance with the configuration for | |
2172 | # this build. | |
83be3106 | 2173 | p = pakfire.PakfireServer(config=config, arch=self.arch.name) |
f6e6ff79 MT |
2174 | |
2175 | # Try to solve the build dependencies. | |
2176 | try: | |
2177 | solver = p.resolvdep(filename) | |
2178 | ||
2179 | # Catch dependency errors and log the problem string. | |
2180 | except DependencyError, e: | |
2181 | self.state = "dependency_error" | |
2182 | self.update_message(e) | |
2183 | ||
2184 | else: | |
2185 | # If the build dependencies can be resolved, we set the build in | |
2186 | # pending state. | |
2187 | if solver.status is True: | |
2188 | if self.state in ("failed",): | |
2189 | return | |
2190 | ||
2191 | self.state = "pending" |