]>
Commit | Line | Data |
---|---|---|
f6e6ff79 MT |
1 | #!/usr/bin/python |
2 | ||
3 | import datetime | |
4 | import hashlib | |
5 | import logging | |
6 | import os | |
7 | import re | |
8 | import shutil | |
9 | import uuid | |
10 | ||
11 | import pakfire | |
12 | import pakfire.config | |
13 | import pakfire.packages | |
14 | ||
15 | import base | |
16 | import builders | |
17 | import logs | |
18 | import packages | |
19 | import repository | |
20 | import updates | |
21 | import users | |
22 | ||
23 | from constants import * | |
24 | ||
25 | def import_from_package(_pakfire, filename, distro=None, commit=None, type="release", | |
26 | arches=None, check_for_duplicates=True, owner=None): | |
27 | ||
28 | if distro is None: | |
29 | distro = commit.source.distro | |
30 | ||
31 | assert distro | |
32 | ||
33 | # Open the package file to read some basic information. | |
34 | pkg = pakfire.packages.open(None, None, filename) | |
35 | ||
36 | if check_for_duplicates: | |
37 | if distro.has_package(pkg.name, pkg.epoch, pkg.version, pkg.release): | |
38 | logging.warning("Duplicate package detected: %s. Skipping." % pkg) | |
39 | return | |
40 | ||
41 | # Open the package and add it to the database. | |
42 | pkg = packages.Package.open(_pakfire, filename) | |
43 | logging.debug("Created new package: %s" % pkg) | |
44 | ||
45 | # Associate the package to the processed commit. | |
46 | if commit: | |
47 | pkg.commit = commit | |
48 | ||
49 | # Create a new build object from the package which | |
50 | # is always a release build. | |
51 | build = Build.create(_pakfire, pkg, type=type, owner=owner, distro=distro) | |
52 | logging.debug("Created new build job: %s" % build) | |
53 | ||
54 | # Create all automatic jobs. | |
55 | build.create_autojobs(arches=arches) | |
56 | ||
57 | return pkg, build | |
58 | ||
59 | ||
60 | class Builds(base.Object): | |
eedc6432 MT |
61 | def get_by_id(self, id, data=None): |
62 | return Build(self.pakfire, id, data=data) | |
f6e6ff79 MT |
63 | |
64 | def get_by_uuid(self, uuid): | |
65 | build = self.db.get("SELECT id FROM builds WHERE uuid = %s LIMIT 1", uuid) | |
66 | ||
67 | if build: | |
68 | return self.get_by_id(build.id) | |
69 | ||
70 | def get_all(self, limit=50): | |
eedc6432 | 71 | query = "SELECT * FROM builds ORDER BY time_created DESC" |
f6e6ff79 MT |
72 | |
73 | if limit: | |
74 | query += " LIMIT %d" % limit | |
75 | ||
eedc6432 | 76 | return [self.get_by_id(b.id, b) for b in self.db.query(query)] |
f6e6ff79 | 77 | |
eedc6432 | 78 | def get_by_user(self, user, type=None, public=None): |
f6e6ff79 MT |
79 | args = [] |
80 | conditions = [] | |
81 | ||
82 | if not type or type == "scratch": | |
83 | # On scratch builds the user id equals the owner id. | |
84 | conditions.append("(builds.type = 'scratch' AND owner_id = %s)") | |
85 | args.append(user.id) | |
86 | ||
87 | elif not type or type == "release": | |
88 | pass # TODO | |
89 | ||
90 | if public is True: | |
91 | conditions.append("public = 'Y'") | |
92 | elif public is False: | |
93 | conditions.append("public = 'N'") | |
94 | ||
eedc6432 | 95 | query = "SELECT builds.* AS id FROM builds \ |
f6e6ff79 MT |
96 | JOIN packages ON builds.pkg_id = packages.id" |
97 | ||
98 | if conditions: | |
99 | query += " WHERE %s" % " AND ".join(conditions) | |
100 | ||
eedc6432 | 101 | query += " ORDER BY builds.time_created DESC" |
f6e6ff79 | 102 | |
eedc6432 | 103 | builds = [] |
f6e6ff79 | 104 | for build in self.db.query(query, *args): |
eedc6432 MT |
105 | build = Build(self.pakfire, build.id, build) |
106 | builds.append(build) | |
107 | ||
108 | return builds | |
f6e6ff79 MT |
109 | |
110 | def get_by_name(self, name, type=None, public=None, user=None): | |
111 | args = [name,] | |
112 | conditions = [ | |
113 | "packages.name = %s", | |
114 | ] | |
115 | ||
116 | if type: | |
117 | conditions.append("builds.type = %s") | |
118 | args.append(type) | |
119 | ||
120 | or_conditions = [] | |
121 | if public is True: | |
122 | or_conditions.append("public = 'Y'") | |
123 | elif public is False: | |
124 | or_conditions.append("public = 'N'") | |
125 | ||
126 | if user and not user.is_admin(): | |
127 | or_conditions.append("builds.owner_id = %s") | |
128 | args.append(user.id) | |
129 | ||
130 | query = "SELECT builds.id AS id FROM builds \ | |
131 | JOIN packages ON builds.pkg_id = packages.id" | |
132 | ||
133 | if or_conditions: | |
134 | conditions.append(" OR ".join(or_conditions)) | |
135 | ||
136 | if conditions: | |
137 | query += " WHERE %s" % " AND ".join(conditions) | |
138 | ||
139 | query += " ORDER BY packages.name,packages.epoch,packages.version,packages.release,id ASC" | |
140 | ||
141 | return sorted([Build(self.pakfire, b.id) for b in self.db.query(query, *args)]) | |
142 | ||
143 | def get_latest_by_name(self, name, type=None, public=None): | |
144 | if type is None: | |
145 | types = ("release", "scratch") | |
146 | else: | |
147 | types = (type,) | |
148 | ||
149 | query = "SELECT builds.id AS id FROM builds \ | |
150 | JOIN packages ON builds.pkg_id = packages.id \ | |
151 | WHERE builds.type = %s AND packages.name = %s" | |
152 | args = [name,] | |
153 | ||
154 | if public is True: | |
155 | query += " AND builds.public = 'Y'" | |
156 | elif public is False: | |
157 | query += " AND builds.public = 'N'" | |
158 | ||
159 | for type in types: | |
160 | res = self.db.query(query, type, *args) | |
161 | if not res: | |
162 | continue | |
163 | ||
164 | builds = [Build(self.pakfire, b.id) for b in res] | |
165 | builds.sort(reverse=True) | |
166 | ||
167 | return builds[0] | |
168 | ||
fd0e70ec MT |
169 | def get_active_builds(self, name, public=None): |
170 | query = "\ | |
171 | SELECT builds.* FROM builds \ | |
172 | LEFT JOIN packages ON builds.pkg_id = packages.id \ | |
173 | WHERE packages.name = %s" | |
174 | args = [name,] | |
175 | ||
176 | if public is True: | |
177 | query += " AND builds.public = %s" | |
178 | args.append("Y") | |
179 | elif public is False: | |
180 | query += " AND builds.public = %s" | |
181 | args.append("N") | |
182 | ||
183 | query += " AND builds.id IN ( \ | |
184 | SELECT build_id FROM repositories_builds \ | |
185 | )" | |
186 | ||
187 | builds = [] | |
188 | for row in self.db.query(query, *args): | |
189 | b = Build(self.pakfire, row.id, row) | |
190 | builds.append(b) | |
191 | ||
192 | # Sort the result. Lastest build first. | |
193 | builds.sort(reverse=True) | |
194 | ||
195 | return builds | |
196 | ||
f6e6ff79 MT |
197 | def count(self): |
198 | count = self.cache.get("builds_count") | |
199 | if count is None: | |
200 | builds = self.db.get("SELECT COUNT(*) AS count FROM builds") | |
201 | ||
202 | count = builds.count | |
203 | self.cache.set("builds_count", count, 3600 / 4) | |
204 | ||
205 | return count | |
206 | ||
207 | def needs_test(self, threshold, arch, limit=None, randomize=False): | |
208 | query = "SELECT id FROM builds \ | |
209 | WHERE NOT EXISTS \ | |
210 | (SELECT * FROM jobs WHERE \ | |
211 | jobs.build_id = builds.id AND \ | |
21fcb77a | 212 | jobs.arch_id = %s AND \ |
f6e6ff79 MT |
213 | (jobs.state != 'finished' OR \ |
214 | jobs.time_finished >= %s) \ | |
215 | ) \ | |
216 | AND EXISTS \ | |
217 | (SELECT * FROM jobs WHERE \ | |
218 | jobs.build_id = builds.id AND \ | |
219 | jobs.arch_id = %s AND \ | |
220 | jobs.type = 'build' AND \ | |
221 | jobs.state = 'finished' AND \ | |
222 | jobs.time_finished < %s \ | |
223 | ) \ | |
83be3106 MT |
224 | AND builds.type = 'release' \ |
225 | AND (builds.state = 'stable' OR builds.state = 'testing')" | |
21fcb77a | 226 | args = [arch.id, threshold, arch.id, threshold] |
f6e6ff79 MT |
227 | |
228 | if randomize: | |
229 | query += " ORDER BY RAND()" | |
230 | ||
231 | if limit: | |
232 | query += " LIMIT %s" | |
233 | args.append(limit) | |
234 | ||
235 | return [Build(self.pakfire, b.id) for b in self.db.query(query, *args)] | |
236 | ||
237 | def get_obsolete(self, repo=None): | |
238 | """ | |
239 | Get all obsoleted builds. | |
240 | ||
241 | If repo is True: which are in any repository. | |
242 | If repo is some Repository object: which are in this repository. | |
243 | """ | |
244 | args = [] | |
245 | ||
246 | if repo is None: | |
247 | query = "SELECT id FROM builds WHERE state = 'obsolete'" | |
248 | ||
249 | else: | |
250 | query = "SELECT build_id AS id FROM repositories_builds \ | |
251 | JOIN builds ON builds.id = repositories_builds.build_id \ | |
252 | WHERE builds.state = 'obsolete'" | |
253 | ||
254 | if repo and not repo is True: | |
255 | query += " AND repositories_builds.repo_id = %s" | |
256 | args.append(repo.id) | |
257 | ||
258 | res = self.db.query(query, *args) | |
259 | ||
260 | builds = [] | |
261 | for build in res: | |
262 | build = Build(self.pakfire, build.id) | |
263 | builds.append(build) | |
264 | ||
265 | return builds | |
266 | ||
4b1e87c4 MT |
267 | def get_changelog(self, name, public=None, limit=5, offset=0): |
268 | query = "SELECT builds.* FROM builds \ | |
269 | JOIN packages ON builds.pkg_id = packages.id \ | |
270 | WHERE \ | |
271 | builds.type = %s \ | |
272 | AND \ | |
273 | packages.name = %s" | |
274 | args = ["release", name,] | |
275 | ||
276 | if public == True: | |
277 | query += " AND builds.public = %s" | |
278 | args.append("Y") | |
279 | elif public == False: | |
280 | query += " AND builds.public = %s" | |
281 | args.append("N") | |
282 | ||
283 | query += " ORDER BY builds.time_created DESC" | |
284 | ||
285 | if limit: | |
286 | if offset: | |
287 | query += " LIMIT %s,%s" | |
288 | args += [offset, limit] | |
289 | else: | |
290 | query += " LIMIT %s" | |
291 | args.append(limit) | |
292 | ||
293 | builds = [] | |
294 | for b in self.db.query(query, *args): | |
295 | b = Build(self.pakfire, b.id, b) | |
296 | builds.append(b) | |
297 | ||
298 | builds.sort(reverse=True) | |
299 | ||
300 | return builds | |
301 | ||
62c7e7cd MT |
302 | def get_comments(self, limit=10, offset=None, user=None): |
303 | query = "SELECT * FROM builds_comments \ | |
304 | JOIN users ON builds_comments.user_id = users.id" | |
305 | args = [] | |
306 | ||
307 | wheres = [] | |
308 | if user: | |
309 | wheres.append("users.id = %s") | |
310 | args.append(user.id) | |
311 | ||
312 | if wheres: | |
313 | query += " WHERE %s" % " AND ".join(wheres) | |
314 | ||
315 | # Sort everything. | |
316 | query += " ORDER BY time_created DESC" | |
317 | ||
318 | # Limits. | |
319 | if limit: | |
320 | if offset: | |
321 | query += " LIMIT %s,%s" | |
322 | args.append(offset) | |
323 | else: | |
324 | query += " LIMIT %s" | |
325 | ||
326 | args.append(limit) | |
327 | ||
328 | comments = [] | |
329 | for comment in self.db.query(query, *args): | |
330 | comment = logs.CommentLogEntry(self.pakfire, comment) | |
331 | comments.append(comment) | |
332 | ||
333 | return comments | |
334 | ||
bc293d03 MT |
335 | def get_build_times_summary(self, name=None, job_type=None): |
336 | query = "\ | |
337 | SELECT \ | |
338 | builds_times.arch AS arch, \ | |
339 | MAX(duration) AS maximum, \ | |
340 | MIN(duration) AS minimum, \ | |
341 | AVG(duration) AS average, \ | |
342 | SUM(duration) AS sum, \ | |
343 | STDDEV_POP(duration) AS stddev \ | |
344 | FROM builds_times \ | |
345 | LEFT JOIN builds ON builds_times.build_id = builds.id \ | |
346 | LEFT JOIN packages ON builds.pkg_id = packages.id" | |
347 | ||
348 | args = [] | |
349 | conditions = [] | |
350 | ||
351 | # Filter for name. | |
352 | if name: | |
353 | conditions.append("packages.name = %s") | |
354 | args.append(name) | |
355 | ||
356 | # Filter by job types. | |
357 | if type: | |
358 | conditions.append("builds_times.job_type = %s") | |
359 | args.append(job_type) | |
360 | ||
361 | # Add conditions. | |
362 | if conditions: | |
363 | query += " WHERE %s" % " AND ".join(conditions) | |
364 | ||
365 | # Grouping and sorting. | |
366 | query += " GROUP BY arch ORDER BY arch DESC" | |
367 | ||
368 | return self.db.query(query, *args) | |
369 | ||
f6e6ff79 MT |
370 | |
371 | class Build(base.Object): | |
734c61e0 | 372 | def __init__(self, pakfire, id, data=None): |
f6e6ff79 MT |
373 | base.Object.__init__(self, pakfire) |
374 | ||
375 | # ID of this build | |
376 | self.id = id | |
377 | ||
378 | # Cache data. | |
734c61e0 | 379 | self._data = data |
f6e6ff79 MT |
380 | self._jobs = None |
381 | self._jobs_test = None | |
382 | self._depends_on = None | |
383 | self._pkg = None | |
384 | self._credits = None | |
385 | self._owner = None | |
386 | self._update = None | |
387 | self._repo = None | |
388 | self._distro = None | |
389 | ||
390 | def __repr__(self): | |
391 | return "<%s id=%s %s>" % (self.__class__.__name__, self.id, self.pkg) | |
392 | ||
393 | def __cmp__(self, other): | |
394 | assert self.pkg | |
395 | assert other.pkg | |
396 | ||
397 | return cmp(self.pkg, other.pkg) | |
398 | ||
399 | @property | |
400 | def cache_key(self): | |
401 | return "build_%s" % self.id | |
402 | ||
403 | def clear_cache(self): | |
404 | """ | |
405 | Clear the stored data from the cache. | |
406 | """ | |
407 | self.cache.delete(self.cache_key) | |
408 | ||
409 | @classmethod | |
410 | def create(cls, pakfire, pkg, type="release", owner=None, distro=None, public=True): | |
411 | assert type in ("release", "scratch", "test") | |
412 | assert distro, "You need to specify the distribution of this build." | |
413 | ||
414 | if public: | |
415 | public = "Y" | |
416 | else: | |
417 | public = "N" | |
418 | ||
419 | # Check if scratch build has an owner. | |
420 | if type == "scratch" and not owner: | |
421 | raise Exception, "Scratch builds require an owner" | |
422 | ||
423 | # Set the default priority of this build. | |
424 | if type == "release": | |
425 | priority = 0 | |
426 | ||
427 | elif type == "scratch": | |
428 | priority = 1 | |
429 | ||
430 | elif type == "test": | |
431 | priority = -1 | |
432 | ||
433 | id = pakfire.db.execute(""" | |
434 | INSERT INTO builds(uuid, pkg_id, type, distro_id, time_created, public, priority) | |
435 | VALUES(%s, %s, %s, %s, NOW(), %s, %s)""", "%s" % uuid.uuid4(), pkg.id, | |
436 | type, distro.id, public, priority) | |
437 | ||
438 | # Set the owner of this buildgroup. | |
439 | if owner: | |
440 | pakfire.db.execute("UPDATE builds SET owner_id = %s WHERE id = %s", | |
441 | owner.id, id) | |
442 | ||
443 | build = cls(pakfire, id) | |
444 | ||
445 | # Log that the build has been created. | |
446 | build.log("created", user=owner) | |
447 | ||
448 | # Create directory where the files live. | |
449 | if not os.path.exists(build.path): | |
450 | os.makedirs(build.path) | |
451 | ||
452 | # Move package file to the directory of the build. | |
453 | source_path = os.path.join(build.path, "src") | |
454 | build.pkg.move(source_path) | |
455 | ||
456 | # Generate an update id. | |
457 | build.generate_update_id() | |
458 | ||
459 | # Obsolete all other builds with the same name to track updates. | |
460 | build.obsolete_others() | |
461 | ||
462 | # Search for possible bug IDs in the commit message. | |
463 | build.search_for_bugs() | |
464 | ||
465 | return build | |
466 | ||
467 | def delete(self): | |
468 | """ | |
469 | Deletes this build including all jobs, packages and the source | |
470 | package. | |
471 | """ | |
472 | # If the build is in a repository, we need to remove it. | |
473 | if self.repo: | |
474 | self.repo.rem_build(self) | |
475 | ||
476 | for job in self.jobs + self.test_jobs: | |
477 | job.delete() | |
478 | ||
479 | if self.pkg: | |
480 | self.pkg.delete() | |
481 | ||
482 | # Delete everything related to this build. | |
483 | self.__delete_bugs() | |
484 | self.__delete_comments() | |
485 | self.__delete_history() | |
486 | self.__delete_watchers() | |
487 | ||
488 | # Delete the build itself. | |
489 | self.db.execute("DELETE FROM builds WHERE id = %s", self.id) | |
490 | self.clear_cache() | |
491 | ||
492 | def __delete_bugs(self): | |
493 | """ | |
494 | Delete all associated bugs. | |
495 | """ | |
496 | self.db.execute("DELETE FROM builds_bugs WHERE build_id = %s", self.id) | |
497 | ||
498 | def __delete_comments(self): | |
499 | """ | |
500 | Delete all comments. | |
501 | """ | |
502 | self.db.execute("DELETE FROM builds_comments WHERE build_id = %s", self.id) | |
503 | ||
504 | def __delete_history(self): | |
505 | """ | |
506 | Delete the repository history. | |
507 | """ | |
508 | self.db.execute("DELETE FROM repositories_history WHERE build_id = %s", self.id) | |
509 | ||
510 | def __delete_watchers(self): | |
511 | """ | |
512 | Delete all watchers. | |
513 | """ | |
514 | self.db.execute("DELETE FROM builds_watchers WHERE build_id = %s", self.id) | |
515 | ||
516 | def reset(self): | |
517 | """ | |
518 | Resets the whole build so it can start again (as it has never | |
519 | been started). | |
520 | """ | |
521 | for job in self.jobs: | |
522 | job.reset() | |
523 | ||
524 | #self.__delete_bugs() | |
525 | self.__delete_comments() | |
526 | self.__delete_history() | |
527 | self.__delete_watchers() | |
528 | ||
529 | self.state = "building" | |
530 | ||
531 | # XXX empty log | |
532 | ||
533 | @property | |
534 | def data(self): | |
535 | """ | |
536 | Lazy fetching of data for this object. | |
537 | """ | |
538 | if self._data is None: | |
539 | data = self.cache.get(self.cache_key) | |
540 | if not data: | |
541 | # Fetch the whole row in one call. | |
542 | data = self.db.get("SELECT * FROM builds WHERE id = %s", self.id) | |
543 | self.cache.set(self.cache_key, data) | |
544 | ||
545 | self._data = data | |
546 | assert self._data | |
547 | ||
548 | return self._data | |
549 | ||
550 | @property | |
551 | def info(self): | |
552 | """ | |
553 | A set of information that is sent to the XMLRPC client. | |
554 | """ | |
555 | return { "uuid" : self.uuid } | |
556 | ||
557 | def log(self, action, user=None, bug_id=None): | |
558 | user_id = None | |
559 | if user: | |
560 | user_id = user.id | |
561 | ||
562 | self.db.execute("INSERT INTO builds_history(build_id, action, user_id, time, bug_id) \ | |
563 | VALUES(%s, %s, %s, NOW(), %s)", self.id, action, user_id, bug_id) | |
564 | ||
565 | @property | |
566 | def uuid(self): | |
567 | """ | |
568 | The UUID of this build. | |
569 | """ | |
570 | return self.data.uuid | |
571 | ||
572 | @property | |
573 | def pkg(self): | |
574 | """ | |
575 | Get package that is to be built in the build. | |
576 | """ | |
577 | if self._pkg is None: | |
578 | self._pkg = packages.Package(self.pakfire, self.data.pkg_id) | |
579 | ||
580 | return self._pkg | |
581 | ||
582 | @property | |
583 | def name(self): | |
584 | return "%s-%s" % (self.pkg.name, self.pkg.friendly_version) | |
585 | ||
586 | @property | |
587 | def type(self): | |
588 | """ | |
589 | The type of this build. | |
590 | """ | |
591 | return self.data.type | |
592 | ||
593 | @property | |
594 | def owner_id(self): | |
595 | """ | |
596 | The ID of the owner of this build. | |
597 | """ | |
598 | return self.data.owner_id | |
599 | ||
600 | @property | |
601 | def owner(self): | |
602 | """ | |
603 | The owner of this build. | |
604 | """ | |
605 | if not self.owner_id: | |
606 | return | |
607 | ||
608 | if self._owner is None: | |
609 | self._owner = self.pakfire.users.get_by_id(self.owner_id) | |
610 | assert self._owner | |
611 | ||
612 | return self._owner | |
613 | ||
614 | @property | |
615 | def distro_id(self): | |
616 | return self.data.distro_id | |
617 | ||
618 | @property | |
619 | def distro(self): | |
620 | if self._distro is None: | |
621 | self._distro = self.pakfire.distros.get_by_id(self.distro_id) | |
622 | assert self._distro | |
623 | ||
624 | return self._distro | |
625 | ||
626 | @property | |
627 | def user(self): | |
628 | if self.type == "scratch": | |
629 | return self.owner | |
630 | ||
631 | def get_depends_on(self): | |
632 | if self.data.depends_on and self._depends_on is None: | |
633 | self._depends_on = Build(self.pakfire, self.data.depends_on) | |
634 | ||
635 | return self._depends_on | |
636 | ||
637 | def set_depends_on(self, build): | |
638 | self.db.execute("UPDATE builds SET depends_on = %s WHERE id = %s", | |
639 | build.id, self.id) | |
640 | self.clear_cache() | |
641 | ||
642 | # Update cache. | |
643 | self._depends_on = build | |
644 | self._data["depends_on"] = build.id | |
645 | ||
646 | depends_on = property(get_depends_on, set_depends_on) | |
647 | ||
648 | @property | |
649 | def created(self): | |
650 | return self.data.time_created | |
651 | ||
eedc6432 MT |
652 | @property |
653 | def date(self): | |
654 | return self.created.date() | |
655 | ||
f6e6ff79 MT |
656 | @property |
657 | def public(self): | |
658 | """ | |
659 | Is this build public? | |
660 | """ | |
661 | return self.data.public == "Y" | |
662 | ||
eedc6432 MT |
663 | @property |
664 | def size(self): | |
665 | """ | |
666 | Returns the size on disk of this build. | |
667 | """ | |
668 | s = 0 | |
669 | ||
670 | # Add the source package. | |
671 | if self.pkg: | |
672 | s += self.pkg.size | |
673 | ||
674 | # Add all jobs. | |
675 | s += sum((j.size for j in self.jobs)) | |
676 | ||
677 | return s | |
678 | ||
f6e6ff79 MT |
679 | #@property |
680 | #def state(self): | |
681 | # # Cache all states. | |
682 | # states = [j.state for j in self.jobs] | |
683 | # | |
684 | # target_state = "unknown" | |
685 | # | |
686 | # # If at least one job has failed, the whole build has failed. | |
687 | # if "failed" in states: | |
688 | # target_state = "failed" | |
689 | # | |
690 | # # It at least one of the jobs is still running, the whole | |
691 | # # build is in running state. | |
692 | # elif "running" in states: | |
693 | # target_state = "running" | |
694 | # | |
695 | # # If all jobs are in the finished state, we turn into finished | |
696 | # # state as well. | |
697 | # elif all([s == "finished" for s in states]): | |
698 | # target_state = "finished" | |
699 | # | |
700 | # return target_state | |
701 | ||
702 | def auto_update_state(self): | |
703 | """ | |
704 | Check if the state of this build can be updated and perform | |
705 | the change if possible. | |
706 | """ | |
707 | # Do not change the broken/obsolete state automatically. | |
708 | if self.state in ("broken", "obsolete"): | |
709 | return | |
710 | ||
711 | if self.repo and self.repo.type == "stable": | |
712 | self.update_state("stable") | |
713 | return | |
714 | ||
715 | # If any of the build jobs are finished, the build will be put in testing | |
716 | # state. | |
717 | for job in self.jobs: | |
718 | if job.state == "finished": | |
719 | self.update_state("testing") | |
720 | break | |
721 | ||
722 | def update_state(self, state, user=None, remove=False): | |
723 | assert state in ("stable", "testing", "obsolete", "broken") | |
724 | ||
725 | self.db.execute("UPDATE builds SET state = %s WHERE id = %s", state, self.id) | |
726 | ||
727 | if self._data: | |
728 | self._data["state"] = state | |
f6e6ff79 MT |
729 | |
730 | # In broken state, the removal from the repository is forced and | |
731 | # all jobs that are not finished yet will be aborted. | |
732 | if state == "broken": | |
733 | remove = True | |
734 | ||
735 | for job in self.jobs: | |
736 | if job.state in ("new", "pending", "running", "dependency_error"): | |
737 | job.state = "aborted" | |
738 | ||
739 | # If this build is in a repository, it will leave it. | |
740 | if remove and self.repo: | |
741 | self.repo.rem_build(self) | |
742 | ||
743 | # If a release build is now in testing state, we put it into the | |
744 | # first repository of the distribution. | |
745 | elif self.type == "release" and state == "testing": | |
746 | # If the build is not in a repository, yet and if there is | |
747 | # a first repository, we put the build there. | |
748 | if not self.repo and self.distro.first_repo: | |
749 | self.distro.first_repo.add_build(self, user=user) | |
750 | ||
751 | @property | |
752 | def state(self): | |
753 | return self.data.state | |
754 | ||
9fa1787c MT |
755 | def is_broken(self): |
756 | return self.state == "broken" | |
757 | ||
f6e6ff79 MT |
758 | def obsolete_others(self): |
759 | if not self.type == "release": | |
760 | return | |
761 | ||
762 | for build in self.pakfire.builds.get_by_name(self.pkg.name, type="release"): | |
763 | # Don't modify ourself. | |
764 | if self.id == build.id: | |
765 | continue | |
766 | ||
767 | # Don't touch broken builds. | |
768 | if build.state in ("obsolete", "broken"): | |
769 | continue | |
770 | ||
771 | # Obsolete the build. | |
772 | build.update_state("obsolete") | |
773 | ||
774 | def set_severity(self, severity): | |
775 | self.db.execute("UPDATE builds SET severity = %s WHERE id = %s", state, self.id) | |
776 | ||
777 | if self._data: | |
778 | self._data["severity"] = severity | |
779 | self.clear_cache() | |
780 | ||
781 | def get_severity(self): | |
782 | return self.data.severity | |
783 | ||
784 | severity = property(get_severity, set_severity) | |
785 | ||
786 | @property | |
787 | def commit(self): | |
788 | if self.pkg and self.pkg.commit: | |
789 | return self.pkg.commit | |
790 | ||
791 | def update_message(self, msg): | |
792 | self.db.execute("UPDATE builds SET message = %s WHERE id = %s", msg, self.id) | |
793 | ||
794 | if self._data: | |
795 | self._data["message"] = msg | |
796 | self.clear_cache() | |
797 | ||
798 | def has_perm(self, user): | |
799 | """ | |
800 | Check, if the given user has the right to perform administrative | |
801 | operations on this build. | |
802 | """ | |
803 | if user is None: | |
804 | return False | |
805 | ||
806 | if user.is_admin(): | |
807 | return True | |
808 | ||
809 | # Check if the user is allowed to manage packages from the critical path. | |
810 | if self.critical_path and not user.has_perm("manage_critical_path"): | |
811 | return False | |
812 | ||
813 | # Search for maintainers... | |
814 | ||
815 | # Scratch builds. | |
816 | if self.type == "scratch": | |
817 | # The owner of a scratch build has the right to do anything with it. | |
818 | if self.owner_id == user.id: | |
819 | return True | |
820 | ||
821 | # Release builds. | |
822 | elif self.type == "release": | |
823 | # The maintainer also is allowed to manage the build. | |
824 | if self.pkg.maintainer == user: | |
825 | return True | |
826 | ||
827 | # Deny permission for all other cases. | |
828 | return False | |
829 | ||
830 | @property | |
831 | def message(self): | |
832 | message = "" | |
833 | ||
834 | if self.data.message: | |
835 | message = self.data.message | |
836 | ||
837 | elif self.commit: | |
838 | if self.commit.message: | |
839 | message = "\n".join((self.commit.subject, self.commit.message)) | |
840 | else: | |
841 | message = self.commit.subject | |
842 | ||
843 | prefix = "%s: " % self.pkg.name | |
844 | if message.startswith(prefix): | |
845 | message = message[len(prefix):] | |
846 | ||
847 | return message | |
848 | ||
849 | def get_priority(self): | |
850 | return self.data.priority | |
851 | ||
852 | def set_priority(self, priority): | |
853 | assert priority in (-2, -1, 0, 1, 2) | |
854 | ||
855 | self.db.execute("UPDATE builds SET priority = %s WHERE id = %s", priority, | |
856 | self.id) | |
857 | self.clear_cache() | |
858 | ||
859 | if self._data: | |
860 | self._data["priority"] = priority | |
861 | ||
862 | priority = property(get_priority, set_priority) | |
863 | ||
864 | @property | |
865 | def path(self): | |
866 | path = [] | |
867 | if self.type == "scratch": | |
868 | path.append(BUILD_SCRATCH_DIR) | |
869 | path.append(self.uuid) | |
870 | ||
871 | elif self.type == "release": | |
872 | path.append(BUILD_RELEASE_DIR) | |
873 | path.append("%s/%s-%s-%s" % \ | |
874 | (self.pkg.name, self.pkg.epoch, self.pkg.version, self.pkg.release)) | |
875 | ||
876 | else: | |
877 | raise Exception, "Unknown build type: %s" % self.type | |
878 | ||
879 | return os.path.join(*path) | |
880 | ||
881 | @property | |
882 | def source_filename(self): | |
883 | return os.path.basename(self.pkg.path) | |
884 | ||
885 | @property | |
886 | def download_prefix(self): | |
887 | return "/".join((self.pakfire.settings.get("download_baseurl"), "packages")) | |
888 | ||
889 | @property | |
890 | def source_download(self): | |
891 | return "/".join((self.download_prefix, self.pkg.path)) | |
892 | ||
893 | @property | |
894 | def source_hash_sha512(self): | |
895 | return self.pkg.hash_sha512 | |
896 | ||
897 | @property | |
898 | def link(self): | |
899 | # XXX maybe this should rather live in a uimodule. | |
900 | # zlib-1.2.3-2.ip3 [src, i686, blah...] | |
901 | s = """<a class="state_%s %s" href="/build/%s">%s</a>""" % \ | |
902 | (self.state, self.type, self.uuid, self.name) | |
903 | ||
904 | s_jobs = [] | |
905 | for job in self.jobs: | |
906 | s_jobs.append("""<a class="state_%s %s" href="/job/%s">%s</a>""" % \ | |
907 | (job.state, job.type, job.uuid, job.arch.name)) | |
908 | ||
909 | if s_jobs: | |
910 | s += " [%s]" % ", ".join(s_jobs) | |
911 | ||
912 | return s | |
913 | ||
914 | @property | |
915 | def supported_arches(self): | |
916 | return self.pkg.supported_arches | |
917 | ||
918 | @property | |
919 | def critical_path(self): | |
920 | return self.pkg.critical_path | |
921 | ||
922 | def get_jobs(self, type=None): | |
923 | """ | |
924 | Returns a list of jobs of this build. | |
925 | """ | |
926 | return self.pakfire.jobs.get_by_build(self.id, self, type=type) | |
927 | ||
928 | @property | |
929 | def jobs(self): | |
930 | """ | |
931 | Get a list of all build jobs that are in this build. | |
932 | """ | |
933 | if self._jobs is None: | |
934 | self._jobs = self.get_jobs(type="build") | |
935 | ||
936 | return self._jobs | |
937 | ||
938 | @property | |
939 | def test_jobs(self): | |
940 | if self._jobs_test is None: | |
941 | self._jobs_test = self.get_jobs(type="test") | |
942 | ||
943 | return self._jobs_test | |
944 | ||
945 | @property | |
946 | def all_jobs_finished(self): | |
947 | ret = True | |
948 | ||
949 | for job in self.jobs: | |
950 | if not job.state == "finished": | |
951 | ret = False | |
952 | break | |
953 | ||
954 | return ret | |
955 | ||
956 | def create_autojobs(self, arches=None, type="build"): | |
957 | jobs = [] | |
958 | ||
959 | # Arches may be passed to this function. If not we use all arches | |
960 | # this package supports. | |
961 | if arches is None: | |
962 | arches = self.supported_arches | |
963 | ||
964 | # Create a new job for every given archirecture. | |
965 | for arch in self.pakfire.arches.expand(arches): | |
966 | # Don't create jobs for src. | |
967 | if arch.name == "src": | |
968 | continue | |
969 | ||
970 | job = self.add_job(arch, type=type) | |
971 | jobs.append(job) | |
972 | ||
973 | # Return all newly created jobs. | |
974 | return jobs | |
975 | ||
976 | def add_job(self, arch, type="build"): | |
977 | job = Job.create(self.pakfire, self, arch, type=type) | |
978 | ||
979 | # Add new job to cache. | |
980 | if self._jobs: | |
981 | self._jobs.append(job) | |
982 | ||
983 | return job | |
984 | ||
985 | ## Update stuff | |
986 | ||
987 | @property | |
988 | def update_id(self): | |
989 | if not self.type == "release": | |
990 | return | |
991 | ||
992 | # Generate an update ID if none does exist, yet. | |
993 | self.generate_update_id() | |
994 | ||
995 | s = [ | |
996 | "%s" % self.distro.name.replace(" ", "").upper(), | |
997 | "%04d" % (self.data.update_year or 0), | |
998 | "%04d" % (self.data.update_num or 0), | |
999 | ] | |
1000 | ||
1001 | return "-".join(s) | |
1002 | ||
1003 | def generate_update_id(self): | |
1004 | if not self.type == "release": | |
1005 | return | |
1006 | ||
1007 | if self.data.update_num: | |
1008 | return | |
1009 | ||
1010 | update = self.db.get("SELECT update_num AS num FROM builds \ | |
1011 | WHERE update_year = YEAR(NOW()) ORDER BY update_num DESC LIMIT 1") | |
1012 | ||
1013 | if update: | |
1014 | update_num = update.num + 1 | |
1015 | else: | |
1016 | update_num = 1 | |
1017 | ||
1018 | self.db.execute("UPDATE builds SET update_year = YEAR(NOW()), update_num = %s \ | |
1019 | WHERE id = %s", update_num, self.id) | |
1020 | ||
1021 | ## Comment stuff | |
1022 | ||
1023 | def get_comments(self, limit=10, offset=0): | |
1024 | query = "SELECT * FROM builds_comments \ | |
1025 | JOIN users ON builds_comments.user_id = users.id \ | |
1026 | WHERE build_id = %s ORDER BY time_created ASC" | |
1027 | ||
1028 | comments = [] | |
1029 | for comment in self.db.query(query, self.id): | |
1030 | comment = logs.CommentLogEntry(self.pakfire, comment) | |
1031 | comments.append(comment) | |
1032 | ||
1033 | return comments | |
1034 | ||
1035 | def add_comment(self, user, text, credit): | |
1036 | # Add the new comment to the database. | |
1037 | id = self.db.execute("INSERT INTO \ | |
1038 | builds_comments(build_id, user_id, text, credit, time_created) \ | |
1039 | VALUES(%s, %s, %s, %s, NOW())", | |
1040 | self.id, user.id, text, credit) | |
1041 | ||
1042 | # Update the credit cache. | |
1043 | if not self._credits is None: | |
1044 | self._credits += credit | |
1045 | ||
1046 | # Send the new comment to all watchers and stuff. | |
1047 | self.send_comment_message(id) | |
1048 | ||
1049 | # Return the ID of the newly created comment. | |
1050 | return id | |
1051 | ||
1052 | @property | |
1053 | def score(self): | |
1054 | # XXX UPDATE THIS | |
1055 | if self._credits is None: | |
1056 | # Get the sum of the credits from the database. | |
1057 | query = self.db.get( | |
1058 | "SELECT SUM(credit) as credits FROM builds_comments WHERE build_id = %s", | |
1059 | self.id | |
1060 | ) | |
1061 | ||
1062 | self._credits = query.credits or 0 | |
1063 | ||
1064 | return self._credits | |
1065 | ||
1066 | @property | |
1067 | def credits(self): | |
1068 | # XXX COMPAT | |
1069 | return self.score | |
1070 | ||
1071 | def get_commenters(self): | |
1072 | users = self.db.query("SELECT DISTINCT users.id AS id FROM builds_comments \ | |
1073 | JOIN users ON builds_comments.user_id = users.id \ | |
1074 | WHERE builds_comments.build_id = %s AND NOT users.deleted = 'Y' \ | |
1075 | AND NOT users.activated = 'Y' ORDER BY users.id", self.id) | |
1076 | ||
1077 | return [users.User(self.pakfire, u.id) for u in users] | |
1078 | ||
1079 | def send_comment_message(self, comment_id): | |
1080 | comment = self.db.get("SELECT * FROM builds_comments WHERE id = %s", | |
1081 | comment_id) | |
1082 | ||
1083 | assert comment | |
1084 | assert comment.build_id == self.id | |
1085 | ||
1086 | # Get user who wrote the comment. | |
1087 | user = self.pakfire.users.get_by_id(comment.user_id) | |
1088 | ||
1089 | format = { | |
1090 | "build_name" : self.name, | |
1091 | "user_name" : user.realname, | |
1092 | } | |
1093 | ||
1094 | # XXX create beautiful message | |
1095 | ||
1096 | self.pakfire.messages.send_to_all(self.message_recipients, | |
1097 | N_("%(user_name)s commented on %(build_name)s"), | |
1098 | comment.text, format) | |
1099 | ||
1100 | ## Logging stuff | |
1101 | ||
1102 | def get_log(self, comments=True, repo=True, limit=None): | |
1103 | entries = [] | |
1104 | ||
fd681905 MT |
1105 | # Created entry. |
1106 | created_entry = logs.CreatedLogEntry(self.pakfire, self) | |
1107 | entries.append(created_entry) | |
1108 | ||
f6e6ff79 MT |
1109 | if comments: |
1110 | entries += self.get_comments(limit=limit) | |
1111 | ||
1112 | if repo: | |
1113 | entries += self.get_repo_moves(limit=limit) | |
1114 | ||
1115 | # Sort all entries in chronological order. | |
1116 | entries.sort() | |
1117 | ||
1118 | if limit: | |
1119 | entries = entries[:limit] | |
1120 | ||
1121 | return entries | |
1122 | ||
1123 | ## Watchers stuff | |
1124 | ||
1125 | def get_watchers(self): | |
1126 | query = self.db.query("SELECT DISTINCT user_id AS id FROM builds_watchers \ | |
1127 | JOIN users ON builds_watchers.user_id = users.id \ | |
1128 | WHERE builds_watchers.build_id = %s AND NOT users.deleted = 'Y' \ | |
1129 | AND users.activated = 'Y' ORDER BY users.id", self.id) | |
1130 | ||
1131 | return [users.User(self.pakfire, u.id) for u in query] | |
1132 | ||
1133 | def add_watcher(self, user): | |
1134 | # Don't add a user twice. | |
1135 | if user in self.get_watchers(): | |
1136 | return | |
1137 | ||
1138 | self.db.execute("INSERT INTO builds_watchers(build_id, user_id) \ | |
1139 | VALUES(%s, %s)", self.id, user.id) | |
1140 | ||
1141 | @property | |
1142 | def message_recipients(self): | |
1143 | ret = [] | |
1144 | ||
1145 | for watcher in self.get_watchers(): | |
1146 | ret.append("%s <%s>" % (watcher.realname, watcher.email)) | |
1147 | ||
1148 | return ret | |
1149 | ||
1150 | @property | |
1151 | def update(self): | |
1152 | if self._update is None: | |
1153 | update = self.db.get("SELECT update_id AS id FROM updates_builds \ | |
1154 | WHERE build_id = %s", self.id) | |
1155 | ||
1156 | if update: | |
1157 | self._update = updates.Update(self.pakfire, update.id) | |
1158 | ||
1159 | return self._update | |
1160 | ||
1161 | @property | |
1162 | def repo(self): | |
1163 | if self._repo is None: | |
1164 | repo = self.db.get("SELECT repo_id AS id FROM repositories_builds \ | |
1165 | WHERE build_id = %s", self.id) | |
1166 | ||
1167 | if repo: | |
1168 | self._repo = repository.Repository(self.pakfire, repo.id) | |
1169 | ||
1170 | return self._repo | |
1171 | ||
1172 | def get_repo_moves(self, limit=None): | |
1173 | query = "SELECT * FROM repositories_history \ | |
1174 | WHERE build_id = %s ORDER BY time ASC" | |
1175 | ||
1176 | actions = [] | |
1177 | for action in self.db.query(query, self.id): | |
1178 | action = logs.RepositoryLogEntry(self.pakfire, action) | |
1179 | actions.append(action) | |
1180 | ||
1181 | return actions | |
1182 | ||
1183 | @property | |
1184 | def is_loose(self): | |
1185 | if self.repo: | |
1186 | return False | |
1187 | ||
1188 | return True | |
1189 | ||
1190 | @property | |
1191 | def repo_time(self): | |
1192 | repo = self.db.get("SELECT time_added FROM repositories_builds \ | |
1193 | WHERE build_id = %s", self.id) | |
1194 | ||
1195 | if repo: | |
1196 | return repo.time_added | |
1197 | ||
1198 | def get_auto_move(self): | |
1199 | return self.data.auto_move == "Y" | |
1200 | ||
1201 | def set_auto_move(self, state): | |
1202 | if state: | |
1203 | state = "Y" | |
1204 | else: | |
1205 | state = "N" | |
1206 | ||
1207 | self.db.execute("UPDATE builds SET auto_move = %s WHERE id = %s", self.id) | |
1208 | if self._data: | |
1209 | self._data["auto_move"] = state | |
1210 | ||
1211 | auto_move = property(get_auto_move, set_auto_move) | |
1212 | ||
1213 | @property | |
1214 | def can_move_forward(self): | |
1215 | if not self.repo: | |
1216 | return False | |
1217 | ||
1218 | # If there is no next repository, we cannot move anything. | |
1219 | next_repo = self.repo.next() | |
1220 | ||
1221 | if not next_repo: | |
1222 | return False | |
1223 | ||
1224 | # If the needed amount of score is reached, we can move forward. | |
1225 | if self.score >= next_repo.score_needed: | |
1226 | return True | |
1227 | ||
1228 | # If the repository does not require a minimal time, | |
1229 | # we can move forward immediately. | |
1230 | if not self.repo.time_min: | |
1231 | return True | |
1232 | ||
1233 | query = self.db.get("SELECT NOW() - time_added AS duration FROM repositories_builds \ | |
1234 | WHERE build_id = %s", self.id) | |
1235 | duration = query.duration | |
1236 | ||
1237 | if duration >= self.repo.time_min: | |
1238 | return True | |
1239 | ||
1240 | return False | |
1241 | ||
1242 | ## Bugs | |
1243 | ||
1244 | def get_bug_ids(self): | |
1245 | query = self.db.query("SELECT bug_id FROM builds_bugs \ | |
1246 | WHERE build_id = %s", self.id) | |
1247 | ||
1248 | return [b.bug_id for b in query] | |
1249 | ||
1250 | def add_bug(self, bug_id, user=None, log=True): | |
1251 | # Check if this bug is already in the list of bugs. | |
1252 | if bug_id in self.get_bug_ids(): | |
1253 | return | |
1254 | ||
1255 | self.db.execute("INSERT INTO builds_bugs(build_id, bug_id) \ | |
1256 | VALUES(%s, %s)", self.id, bug_id) | |
1257 | ||
1258 | # Log the event. | |
1259 | if log: | |
1260 | self.log("bug_added", user=user, bug_id=bug_id) | |
1261 | ||
1262 | def rem_bug(self, bug_id, user=None, log=True): | |
1263 | self.db.execute("DELETE FROM builds_bugs WHERE build_id = %s AND \ | |
1264 | bug_id = %s", self.id, bug_id) | |
1265 | ||
1266 | # Log the event. | |
1267 | if log: | |
1268 | self.log("bug_removed", user=user, bug_id=bug_id) | |
1269 | ||
1270 | def search_for_bugs(self): | |
1271 | if not self.commit: | |
1272 | return | |
1273 | ||
1274 | pattern = re.compile(r"(bug\s?|#)(\d+)") | |
1275 | ||
1276 | for txt in (self.commit.subject, self.commit.message): | |
1277 | for bug in re.finditer(pattern, txt): | |
1278 | try: | |
1279 | bugid = int(bug.group(2)) | |
1280 | except ValueError: | |
1281 | continue | |
1282 | ||
1283 | # Check if a bug with the given ID exists in BZ. | |
1284 | bug = self.pakfire.bugzilla.get_bug(bugid) | |
1285 | if not bug: | |
1286 | continue | |
1287 | ||
1288 | self.add_bug(bugid) | |
1289 | ||
1290 | def get_bugs(self): | |
1291 | bugs = [] | |
1292 | for bug_id in self.get_bug_ids(): | |
1293 | bug = self.pakfire.bugzilla.get_bug(bug_id) | |
1294 | if not bug: | |
1295 | continue | |
1296 | ||
1297 | bugs.append(bug) | |
1298 | ||
1299 | return bugs | |
1300 | ||
1301 | def _update_bugs_helper(self, repo): | |
1302 | """ | |
1303 | This function takes a new status and generates messages that | |
1304 | are appended to all bugs. | |
1305 | """ | |
1306 | try: | |
1307 | kwargs = BUG_MESSAGES[repo.type].copy() | |
1308 | except KeyError: | |
1309 | return | |
1310 | ||
1311 | baseurl = self.pakfire.settings.get("baseurl", "") | |
1312 | args = { | |
1313 | "build_url" : "%s/build/%s" % (baseurl, self.uuid), | |
1314 | "distro_name" : self.distro.name, | |
1315 | "package_name" : self.name, | |
1316 | "repo_name" : repo.name, | |
1317 | } | |
1318 | kwargs["comment"] = kwargs["comment"] % args | |
1319 | ||
1320 | self.update_bugs(**kwargs) | |
1321 | ||
1322 | def _update_bug(self, bug_id, status=None, resolution=None, comment=None): | |
1323 | self.db.execute("INSERT INTO builds_bugs_updates(bug_id, status, resolution, comment, time) \ | |
1324 | VALUES(%s, %s, %s, %s, NOW())", bug_id, status, resolution, comment) | |
1325 | ||
1326 | def update_bugs(self, status, resolution=None, comment=None): | |
1327 | # Update all bugs linked to this build. | |
1328 | for bug_id in self.get_bug_ids(): | |
1329 | self._update_bug(bug_id, status=status, resolution=resolution, comment=comment) | |
1330 | ||
1331 | ||
1332 | class Jobs(base.Object): | |
6ea0393f MT |
1333 | def get_by_id(self, id, data=None): |
1334 | return Job(self.pakfire, id, data) | |
f6e6ff79 MT |
1335 | |
1336 | def get_by_uuid(self, uuid): | |
1337 | job = self.db.get("SELECT id FROM jobs WHERE uuid = %s", uuid) | |
1338 | ||
1339 | if job: | |
1340 | return self.get_by_id(job.id) | |
1341 | ||
1342 | def get_by_build(self, build_id, build=None, type=None): | |
1343 | """ | |
1344 | Get all jobs in the specifies build. | |
1345 | """ | |
9fa1787c | 1346 | query = "SELECT * FROM jobs WHERE build_id = %s" |
f6e6ff79 MT |
1347 | args = [build_id,] |
1348 | ||
1349 | if type: | |
1350 | query += " AND type = %s" | |
1351 | args.append(type) | |
1352 | ||
1353 | # Get IDs of all builds in this group. | |
1354 | jobs = [] | |
1355 | for job in self.db.query(query, *args): | |
9fa1787c | 1356 | job = Job(self.pakfire, job.id, job) |
f6e6ff79 MT |
1357 | |
1358 | # If the Build object was set, we set it so it won't be retrieved | |
1359 | # from the database again. | |
1360 | if build: | |
1361 | job._build = build | |
1362 | ||
1363 | jobs.append(job) | |
1364 | ||
1365 | # Return sorted list of jobs. | |
1366 | return sorted(jobs) | |
1367 | ||
163d9d8b MT |
1368 | def get_active(self, host_id=None, builder=None, states=None): |
1369 | if builder: | |
1370 | host_id = builder.id | |
f6e6ff79 | 1371 | |
163d9d8b MT |
1372 | if states is None: |
1373 | states = ["dispatching", "running", "uploading"] | |
f6e6ff79 | 1374 | |
163d9d8b MT |
1375 | query = "SELECT * FROM jobs WHERE state IN (%s)" % ", ".join(["%s"] * len(states)) |
1376 | args = states | |
f6e6ff79 MT |
1377 | |
1378 | if host_id: | |
1379 | query += " AND builder_id = %s" % host_id | |
1380 | ||
6e63ed49 MT |
1381 | query += " ORDER BY \ |
1382 | CASE \ | |
1383 | WHEN jobs.state = 'running' THEN 0 \ | |
1384 | WHEN jobs.state = 'uploading' THEN 1 \ | |
1385 | WHEN jobs.state = 'dispatching' THEN 2 \ | |
1386 | WHEN jobs.state = 'pending' THEN 3 \ | |
1387 | WHEN jobs.state = 'new' THEN 4 \ | |
1388 | END, time_started ASC" | |
f6e6ff79 | 1389 | |
163d9d8b | 1390 | return [Job(self.pakfire, j.id, j) for j in self.db.query(query, *args)] |
f6e6ff79 | 1391 | |
163d9d8b MT |
1392 | def get_next_iter(self, *args, **kwargs): |
1393 | return iter(self.get_next(*args, **kwargs)) | |
f6e6ff79 | 1394 | |
163d9d8b MT |
1395 | def get_next(self, arches=None, builder=None, limit=None, offset=None, type=None, |
1396 | state=None, states=None, max_tries=None): | |
f6e6ff79 | 1397 | |
163d9d8b MT |
1398 | if state is None and states is None: |
1399 | states = ["pending", "new"] | |
1400 | ||
1401 | if builder and arches is None: | |
1402 | arches = builder.get_arches() | |
1403 | ||
1404 | query = "SELECT jobs.* FROM jobs \ | |
1405 | JOIN builds ON jobs.build_id = builds.id \ | |
1406 | WHERE \ | |
1407 | (start_not_before IS NULL OR start_not_before <= NOW())" | |
1408 | args = [] | |
f6e6ff79 MT |
1409 | |
1410 | if arches: | |
163d9d8b MT |
1411 | query += " AND jobs.arch_id IN (%s)" % ", ".join(["%s"] * len(arches)) |
1412 | args.extend([a.id for a in arches]) | |
1413 | ||
1414 | if builder: | |
1415 | #query += " AND (jobs.builder_id = %s OR jobs.builder_id IS NULL)" | |
1416 | #args.append(builder.id) | |
1417 | ||
1418 | # Check out which types of builds this builder builds. | |
1419 | build_types = [] | |
1420 | for build_type in builder.build_types: | |
1421 | if build_type == "release": | |
1422 | build_types.append("(builds.type = 'release' AND jobs.type = 'build')") | |
1423 | elif build_type == "scratch": | |
1424 | build_types.append("(builds.type = 'scratch' AND jobs.type = 'build')") | |
1425 | elif build_type == "test": | |
1426 | build_types.append("jobs.type = 'test'") | |
1427 | ||
1428 | if build_types: | |
1429 | query += " AND (%s)" % " OR ".join(build_types) | |
f6e6ff79 | 1430 | |
f6e6ff79 | 1431 | if max_tries: |
163d9d8b | 1432 | query += " AND jobs.max_tries <= %s" |
f6e6ff79 MT |
1433 | args.append(max_tries) |
1434 | ||
163d9d8b MT |
1435 | if state: |
1436 | query += " AND jobs.state = %s" | |
1437 | args.append(state) | |
f6e6ff79 | 1438 | |
163d9d8b MT |
1439 | if states: |
1440 | query += " AND jobs.state IN (%s)" % ", ".join(["%s"] * len(states)) | |
1441 | args.extend(states) | |
1442 | ||
1443 | if type: | |
1444 | query += " AND jobs.type = %s" | |
1445 | args.append(type) | |
1446 | ||
1447 | # Order builds. | |
1448 | # Release builds and scratch builds are more important than test builds. | |
1449 | # Builds are sorted by priority and older builds are preferred. | |
f6e6ff79 | 1450 | |
f6e6ff79 | 1451 | query += " ORDER BY \ |
163d9d8b MT |
1452 | CASE \ |
1453 | WHEN jobs.state = 'pending' THEN 0 \ | |
1454 | WHEN jobs.state = 'new' THEN 1 \ | |
1455 | END, \ | |
f6e6ff79 MT |
1456 | CASE \ |
1457 | WHEN jobs.type = 'build' THEN 0 \ | |
1458 | WHEN jobs.type = 'test' THEN 1 \ | |
1459 | END, \ | |
1460 | builds.priority DESC, jobs.time_created ASC" | |
1461 | ||
1462 | if limit: | |
163d9d8b MT |
1463 | query += " LIMIT %s" |
1464 | args.append(limit) | |
f6e6ff79 | 1465 | |
f6e6ff79 | 1466 | jobs = [] |
163d9d8b MT |
1467 | for row in self.db.query(query, *args): |
1468 | job = self.pakfire.jobs.get_by_id(row.id, row) | |
f6e6ff79 MT |
1469 | jobs.append(job) |
1470 | ||
1471 | return jobs | |
1472 | ||
9177f86a | 1473 | def get_latest(self, arch=None, builder=None, limit=None, age=None, date=None): |
9fa1787c | 1474 | query = "SELECT * FROM jobs" |
6e63ed49 | 1475 | args = [] |
f6e6ff79 | 1476 | |
6e63ed49 | 1477 | where = ["(state = 'finished' OR state = 'failed' OR state = 'aborted')"] |
9177f86a MT |
1478 | |
1479 | if arch: | |
1480 | where.append("arch_id = %s") | |
1481 | args.append(arch.id) | |
1482 | ||
f6e6ff79 | 1483 | if builder: |
6e63ed49 MT |
1484 | where.append("builder_id = %s") |
1485 | args.append(builder.id) | |
1486 | ||
1487 | if date: | |
6e63ed49 | 1488 | try: |
9177f86a | 1489 | year, month, day = date.split("-", 2) |
6e63ed49 MT |
1490 | date = datetime.date(int(year), int(month), int(day)) |
1491 | except ValueError: | |
1492 | pass | |
6e63ed49 | 1493 | else: |
9177f86a MT |
1494 | where.append("(DATE(time_created) = %s OR \ |
1495 | DATE(time_started) = %s OR DATE(time_finished) = %s)") | |
1496 | args += (date, date, date) | |
6e63ed49 MT |
1497 | |
1498 | if age: | |
1499 | where.append("time_finished >= DATE_SUB(NOW(), INTERVAL %s)" % age) | |
f6e6ff79 MT |
1500 | |
1501 | if where: | |
1502 | query += " WHERE %s" % " AND ".join(where) | |
1503 | ||
6e63ed49 MT |
1504 | query += " ORDER BY time_finished DESC" |
1505 | ||
1506 | if limit: | |
1507 | query += " LIMIT %s" | |
1508 | args.append(limit) | |
f6e6ff79 | 1509 | |
6e63ed49 | 1510 | return [Job(self.pakfire, j.id, j) for j in self.db.query(query, *args)] |
f6e6ff79 MT |
1511 | |
1512 | def get_average_build_time(self): | |
1513 | """ | |
1514 | Returns the average build time of all finished builds from the | |
1515 | last 3 months. | |
1516 | """ | |
1517 | cache_key = "jobs_avg_build_time" | |
1518 | ||
1519 | build_time = self.cache.get(cache_key) | |
1520 | if not build_time: | |
1521 | result = self.db.get("SELECT AVG(time_finished - time_started) as average \ | |
1522 | FROM jobs WHERE type = 'build' AND state = 'finished' AND \ | |
1523 | time_finished >= DATE_SUB(NOW(), INTERVAL 3 MONTH)") | |
1524 | ||
1525 | build_time = result.average or 0 | |
1526 | self.cache.set(cache_key, build_time, 3600) | |
1527 | ||
1528 | return build_time | |
1529 | ||
1530 | def count(self, *states): | |
1531 | states = sorted(states) | |
1532 | ||
1533 | cache_key = "jobs_count_%s" % ("-".join(states) or "all") | |
1534 | ||
1535 | count = self.cache.get(cache_key) | |
1536 | if count is None: | |
1537 | query = "SELECT COUNT(*) AS count FROM jobs" | |
1538 | args = [] | |
1539 | ||
1540 | if states: | |
1541 | query += " WHERE %s" % " OR ".join("state = %s" for s in states) | |
1542 | args += states | |
1543 | ||
1544 | jobs = self.db.get(query, *args) | |
1545 | ||
1546 | count = jobs.count | |
1547 | self.cache.set(cache_key, count, 60) | |
1548 | ||
1549 | return count | |
1550 | ||
1551 | ||
1552 | class Job(base.Object): | |
9fa1787c | 1553 | def __init__(self, pakfire, id, data=None): |
f6e6ff79 MT |
1554 | base.Object.__init__(self, pakfire) |
1555 | ||
1556 | # The ID of this Job object. | |
1557 | self.id = id | |
1558 | ||
1559 | # Cache the data of this object. | |
9fa1787c | 1560 | self._data = data |
f6e6ff79 MT |
1561 | self._build = None |
1562 | self._builder = None | |
1563 | self._packages = None | |
1564 | self._logfiles = None | |
1565 | ||
1566 | def __str__(self): | |
1567 | return "<%s id=%s %s>" % (self.__class__.__name__, self.id, self.name) | |
1568 | ||
1569 | def __cmp__(self, other): | |
1570 | if self.type == "build" and other.type == "test": | |
1571 | return -1 | |
1572 | elif self.type == "test" and other.type == "build": | |
1573 | return 1 | |
1574 | ||
1575 | if self.build_id == other.build_id: | |
1576 | return cmp(self.arch, other.arch) | |
1577 | ||
1578 | ret = cmp(self.pkg, other.pkg) | |
1579 | ||
1580 | if not ret: | |
1581 | ret = cmp(self.time_created, other.time_created) | |
1582 | ||
1583 | return ret | |
1584 | ||
1585 | @property | |
1586 | def distro(self): | |
1587 | assert self.build.distro | |
1588 | return self.build.distro | |
1589 | ||
1590 | @property | |
1591 | def cache_key(self): | |
1592 | return "job_%s" % self.id | |
1593 | ||
1594 | def clear_cache(self): | |
1595 | """ | |
1596 | Clear the stored data from the cache. | |
1597 | """ | |
1598 | self.cache.delete(self.cache_key) | |
1599 | ||
1600 | @classmethod | |
1601 | def create(cls, pakfire, build, arch, type="build"): | |
1602 | id = pakfire.db.execute("INSERT INTO jobs(uuid, type, build_id, arch_id, time_created) \ | |
1603 | VALUES(%s, %s, %s, %s, NOW())", "%s" % uuid.uuid4(), type, build.id, arch.id) | |
1604 | ||
1605 | job = Job(pakfire, id) | |
1606 | job.log("created") | |
1607 | ||
1608 | # Set cache for Build object. | |
1609 | job._build = build | |
1610 | ||
1611 | # Jobs are by default in state "new" and wait for being checked | |
1612 | # for dependencies. Packages that do have no build dependencies | |
1613 | # can directly be forwarded to "pending" state. | |
1614 | if not job.pkg.requires: | |
1615 | job.state = "pending" | |
1616 | ||
1617 | return job | |
1618 | ||
1619 | def delete(self): | |
1620 | self.__delete_buildroots() | |
1621 | self.__delete_history() | |
1622 | self.__delete_packages() | |
1623 | self.__delete_logfiles() | |
1624 | ||
1625 | # Delete the job itself. | |
1626 | self.db.execute("DELETE FROM jobs WHERE id = %s", self.id) | |
1627 | self.clear_cache() | |
1628 | ||
1629 | def __delete_buildroots(self): | |
1630 | """ | |
1631 | Removes all buildroots. | |
1632 | """ | |
1633 | self.db.execute("DELETE FROM jobs_buildroots WHERE job_id = %s", self.id) | |
1634 | ||
1635 | def __delete_history(self): | |
1636 | """ | |
1637 | Removes all references in the history to this build job. | |
1638 | """ | |
1639 | self.db.execute("DELETE FROM jobs_history WHERE job_id = %s", self.id) | |
1640 | ||
1641 | def __delete_packages(self): | |
1642 | """ | |
1643 | Deletes all uploaded files from the job. | |
1644 | """ | |
1645 | for pkg in self.packages: | |
1646 | pkg.delete() | |
1647 | ||
1648 | self.db.execute("DELETE FROM jobs_packages WHERE job_id = %s", self.id) | |
1649 | ||
1650 | def __delete_logfiles(self): | |
1651 | for logfile in self.logfiles: | |
1652 | self.db.execute("INSERT INTO queue_delete(path) VALUES(%s)", logfile.path) | |
1653 | ||
1654 | def reset(self, user=None): | |
1655 | self.__delete_buildroots() | |
1656 | self.__delete_packages() | |
1657 | self.__delete_history() | |
1658 | self.__delete_logfiles() | |
1659 | ||
1660 | self.state = "new" | |
1661 | self.log("reset", user=user) | |
1662 | ||
1663 | @property | |
1664 | def data(self): | |
1665 | if self._data is None: | |
163d9d8b | 1666 | self._data = self.db.get("SELECT * FROM jobs WHERE id = %s", self.id) |
f6e6ff79 MT |
1667 | assert self._data |
1668 | ||
1669 | return self._data | |
1670 | ||
1671 | ## Logging stuff | |
1672 | ||
1673 | def log(self, action, user=None, state=None, builder=None, test_job=None): | |
1674 | user_id = None | |
1675 | if user: | |
1676 | user_id = user.id | |
1677 | ||
1678 | builder_id = None | |
1679 | if builder: | |
1680 | builder_id = builder.id | |
1681 | ||
1682 | test_job_id = None | |
1683 | if test_job: | |
1684 | test_job_id = test_job.id | |
1685 | ||
1686 | self.db.execute("INSERT INTO jobs_history(job_id, action, state, user_id, \ | |
1687 | time, builder_id, test_job_id) VALUES(%s, %s, %s, %s, NOW(), %s, %s)", | |
1688 | self.id, action, state, user_id, builder_id, test_job_id) | |
1689 | ||
1690 | def get_log(self, limit=None, offset=None, user=None): | |
1691 | query = "SELECT * FROM jobs_history" | |
1692 | ||
1693 | conditions = ["job_id = %s",] | |
1694 | args = [self.id,] | |
1695 | ||
1696 | if user: | |
1697 | conditions.append("user_id = %s") | |
1698 | args.append(user.id) | |
1699 | ||
1700 | if conditions: | |
1701 | query += " WHERE %s" % " AND ".join(conditions) | |
1702 | ||
1703 | query += " ORDER BY time DESC" | |
1704 | ||
1705 | if limit: | |
1706 | if offset: | |
1707 | query += " LIMIT %s,%s" | |
1708 | args += [offset, limit,] | |
1709 | else: | |
1710 | query += " LIMIT %s" | |
1711 | args += [limit,] | |
1712 | ||
1713 | entries = [] | |
1714 | for entry in self.db.query(query, *args): | |
1715 | entry = logs.JobLogEntry(self.pakfire, entry) | |
1716 | entries.append(entry) | |
1717 | ||
1718 | return entries | |
1719 | ||
1720 | @property | |
1721 | def uuid(self): | |
1722 | return self.data.uuid | |
1723 | ||
1724 | @property | |
1725 | def type(self): | |
1726 | return self.data.type | |
1727 | ||
1728 | @property | |
1729 | def build_id(self): | |
1730 | return self.data.build_id | |
1731 | ||
1732 | @property | |
1733 | def build(self): | |
1734 | if self._build is None: | |
1735 | self._build = self.pakfire.builds.get_by_id(self.build_id) | |
1736 | assert self._build | |
1737 | ||
1738 | return self._build | |
1739 | ||
1740 | @property | |
1741 | def related_jobs(self): | |
1742 | ret = [] | |
1743 | ||
1744 | for job in self.build.jobs: | |
1745 | if job == self: | |
1746 | continue | |
1747 | ||
1748 | ret.append(job) | |
1749 | ||
1750 | return ret | |
1751 | ||
1752 | @property | |
1753 | def pkg(self): | |
1754 | return self.build.pkg | |
1755 | ||
1756 | @property | |
1757 | def name(self): | |
1758 | return "%s-%s.%s" % (self.pkg.name, self.pkg.friendly_version, self.arch.name) | |
1759 | ||
eedc6432 MT |
1760 | @property |
1761 | def size(self): | |
1762 | return sum((p.size for p in self.packages)) | |
1763 | ||
f6e6ff79 MT |
1764 | def get_state(self): |
1765 | return self.data.state | |
1766 | ||
1767 | def set_state(self, state, user=None, log=True): | |
1768 | # Nothing to do if the state remains. | |
1769 | if not self.state == state: | |
1770 | self.db.execute("UPDATE jobs SET state = %s WHERE id = %s", state, self.id) | |
1771 | self.clear_cache() | |
1772 | ||
1773 | # Log the event. | |
1774 | if log and not state == "new": | |
1775 | self.log("state_change", state=state, user=user) | |
1776 | ||
1777 | # Update cache. | |
1778 | if self._data: | |
1779 | self._data["state"] = state | |
1780 | ||
1781 | # Always clear the message when the status is changed. | |
1782 | self.update_message(None) | |
1783 | ||
1784 | # Update some more informations. | |
1785 | if state == "dispatching": | |
1786 | # Set start time. | |
1787 | self.db.execute("UPDATE jobs SET time_started = NOW(), time_finished = NULL \ | |
1788 | WHERE id = %s", self.id) | |
1789 | ||
1790 | elif state == "pending": | |
1791 | self.db.execute("UPDATE jobs SET tries = tries + 1, time_started = NULL, \ | |
1792 | time_finished = NULL WHERE id = %s", self.id) | |
1793 | ||
1794 | elif state in ("aborted", "dependency_error", "finished", "failed"): | |
163d9d8b MT |
1795 | # Set finish time and reset builder.. |
1796 | self.db.execute("UPDATE jobs SET time_finished = NOW() WHERE id = %s", self.id) | |
f6e6ff79 MT |
1797 | |
1798 | # Send messages to the user. | |
1799 | if state == "finished": | |
1800 | self.send_finished_message() | |
1801 | ||
1802 | elif state == "failed": | |
1803 | # Remove all package files if a job is set to failed state. | |
1804 | self.__delete_packages() | |
1805 | ||
1806 | self.send_failed_message() | |
1807 | ||
1808 | # Automatically update the state of the build (not on test builds). | |
1809 | if self.type == "build": | |
1810 | self.build.auto_update_state() | |
1811 | ||
1812 | state = property(get_state, set_state) | |
1813 | ||
1814 | @property | |
1815 | def message(self): | |
1816 | return self.data.message | |
1817 | ||
1818 | def update_message(self, msg): | |
1819 | self.db.execute("UPDATE jobs SET message = %s WHERE id = %s", | |
1820 | msg, self.id) | |
1821 | self.clear_cache() | |
1822 | ||
1823 | if self._data: | |
1824 | self._data["message"] = msg | |
1825 | ||
1826 | @property | |
1827 | def builder_id(self): | |
1828 | return self.data.builder_id | |
1829 | ||
1830 | def get_builder(self): | |
1831 | if not self.builder_id: | |
1832 | return | |
1833 | ||
1834 | if self._builder is None: | |
1835 | self._builder = builders.Builder(self.pakfire, self.builder_id) | |
1836 | assert self._builder | |
1837 | ||
1838 | return self._builder | |
1839 | ||
1840 | def set_builder(self, builder, user=None): | |
1841 | self.db.execute("UPDATE jobs SET builder_id = %s WHERE id = %s", | |
1842 | builder.id, self.id) | |
1843 | ||
1844 | # Update cache. | |
1845 | if self._data: | |
1846 | self._data["builder_id"] = builder.id | |
1847 | self.clear_cache() | |
1848 | ||
1849 | self._builder = builder | |
1850 | ||
1851 | # Log the event. | |
1852 | if user: | |
1853 | self.log("builder_assigned", builder=builder, user=user) | |
1854 | ||
1855 | builder = property(get_builder, set_builder) | |
1856 | ||
1857 | @property | |
1858 | def arch_id(self): | |
1859 | return self.data.arch_id | |
1860 | ||
1861 | @property | |
1862 | def arch(self): | |
1863 | return self.pakfire.arches.get_by_id(self.arch_id) | |
1864 | ||
1865 | @property | |
1866 | def duration(self): | |
1867 | if not self.time_started: | |
1868 | return 0 | |
1869 | ||
1870 | if self.time_finished: | |
1871 | delta = self.time_finished - self.time_started | |
1872 | else: | |
1873 | delta = datetime.datetime.utcnow() - self.time_started | |
1874 | ||
1875 | return delta.total_seconds() | |
1876 | ||
1877 | @property | |
1878 | def time_created(self): | |
1879 | return self.data.time_created | |
1880 | ||
1881 | @property | |
1882 | def time_started(self): | |
1883 | return self.data.time_started | |
1884 | ||
1885 | @property | |
1886 | def time_finished(self): | |
1887 | return self.data.time_finished | |
1888 | ||
1889 | @property | |
1890 | def tries(self): | |
1891 | return self.data.tries | |
1892 | ||
1893 | @property | |
1894 | def packages(self): | |
1895 | if self._packages is None: | |
1896 | self._packages = [] | |
1897 | ||
1898 | query = "SELECT pkg_id AS id FROM jobs_packages \ | |
1899 | JOIN packages ON packages.id = jobs_packages.pkg_id \ | |
1900 | WHERE jobs_packages.job_id = %s ORDER BY packages.name" | |
1901 | ||
1902 | for pkg in self.db.query(query, self.id): | |
1903 | pkg = packages.Package(self.pakfire, pkg.id) | |
1904 | pkg._job = self | |
1905 | ||
1906 | self._packages.append(pkg) | |
1907 | ||
1908 | return self._packages | |
1909 | ||
1910 | def get_pkg_by_uuid(self, uuid): | |
1911 | pkg = self.db.get("SELECT packages.id FROM packages \ | |
1912 | JOIN jobs_packages ON jobs_packages.pkg_id = packages.id \ | |
1913 | WHERE jobs_packages.job_id = %s AND packages.uuid = %s", | |
1914 | self.id, uuid) | |
1915 | ||
1916 | if not pkg: | |
1917 | return | |
1918 | ||
1919 | pkg = packages.Package(self.pakfire, pkg.id) | |
1920 | pkg._job = self | |
1921 | ||
1922 | return pkg | |
1923 | ||
1924 | @property | |
1925 | def logfiles(self): | |
1926 | if self._logfiles is None: | |
1927 | self._logfiles = [] | |
1928 | ||
1929 | for log in self.db.query("SELECT id FROM logfiles WHERE job_id = %s", self.id): | |
1930 | log = logs.LogFile(self.pakfire, log.id) | |
1931 | log._job = self | |
1932 | ||
1933 | self._logfiles.append(log) | |
1934 | ||
1935 | return self._logfiles | |
1936 | ||
1937 | def add_file(self, filename): | |
1938 | """ | |
1939 | Add the specified file to this job. | |
1940 | ||
1941 | The file is copied to the right directory by this function. | |
1942 | """ | |
1943 | assert os.path.exists(filename) | |
1944 | ||
1945 | if filename.endswith(".log"): | |
1946 | self._add_file_log(filename) | |
1947 | ||
1948 | elif filename.endswith(".%s" % PACKAGE_EXTENSION): | |
1949 | # It is not allowed to upload packages on test builds. | |
1950 | if self.type == "test": | |
1951 | return | |
1952 | ||
1953 | self._add_file_package(filename) | |
1954 | ||
1955 | def _add_file_log(self, filename): | |
1956 | """ | |
1957 | Attach a log file to this job. | |
1958 | """ | |
1959 | target_dirname = os.path.join(self.build.path, "logs") | |
1960 | ||
1961 | if self.type == "test": | |
1962 | i = 1 | |
1963 | while True: | |
1964 | target_filename = os.path.join(target_dirname, | |
1965 | "test.%s.%s.%s.log" % (self.arch.name, i, self.tries)) | |
1966 | ||
1967 | if os.path.exists(target_filename): | |
1968 | i += 1 | |
1969 | else: | |
1970 | break | |
1971 | else: | |
1972 | target_filename = os.path.join(target_dirname, | |
1973 | "build.%s.%s.log" % (self.arch.name, self.tries)) | |
1974 | ||
1975 | # Make sure the target directory exists. | |
1976 | if not os.path.exists(target_dirname): | |
1977 | os.makedirs(target_dirname) | |
1978 | ||
1979 | # Calculate a SHA512 hash from that file. | |
1980 | f = open(filename, "rb") | |
1981 | h = hashlib.sha512() | |
1982 | while True: | |
1983 | buf = f.read(BUFFER_SIZE) | |
1984 | if not buf: | |
1985 | break | |
1986 | ||
1987 | h.update(buf) | |
1988 | f.close() | |
1989 | ||
1990 | # Copy the file to the final location. | |
1991 | shutil.copy2(filename, target_filename) | |
1992 | ||
1993 | # Create an entry in the database. | |
1994 | self.db.execute("INSERT INTO logfiles(job_id, path, filesize, hash_sha512) \ | |
1995 | VALUES(%s, %s, %s, %s)", self.id, os.path.relpath(target_filename, PACKAGES_DIR), | |
1996 | os.path.getsize(target_filename), h.hexdigest()) | |
1997 | ||
1998 | def _add_file_package(self, filename): | |
1999 | # Open package (creates entry in the database). | |
2000 | pkg = packages.Package.open(self.pakfire, filename) | |
2001 | ||
2002 | # Move package to the build directory. | |
2003 | pkg.move(os.path.join(self.build.path, self.arch.name)) | |
2004 | ||
2005 | # Attach the package to this job. | |
2006 | self.db.execute("INSERT INTO jobs_packages(job_id, pkg_id) VALUES(%s, %s)", | |
2007 | self.id, pkg.id) | |
2008 | ||
2009 | def get_aborted_state(self): | |
2010 | return self.data.aborted_state | |
2011 | ||
2012 | def set_aborted_state(self, state): | |
2013 | self.db.execute("UPDATE jobs SET aborted_state = %s WHERE id = %s", | |
2014 | state, self.id) | |
2015 | self.clear_cache() | |
2016 | ||
2017 | if self._data: | |
2018 | self._data["aborted_state"] = state | |
2019 | ||
2020 | aborted_state = property(get_aborted_state, set_aborted_state) | |
2021 | ||
2022 | @property | |
2023 | def message_recipients(self): | |
2024 | l = [] | |
2025 | ||
2026 | # Add all people watching the build. | |
2027 | l += self.build.message_recipients | |
2028 | ||
2029 | # Add the package maintainer on release builds. | |
2030 | if self.build.type == "release": | |
2031 | maint = self.pkg.maintainer | |
2032 | ||
2033 | if isinstance(maint, users.User): | |
2034 | l.append("%s <%s>" % (maint.realname, maint.email)) | |
2035 | elif maint: | |
2036 | l.append(maint) | |
2037 | ||
2038 | # XXX add committer and commit author. | |
2039 | ||
2040 | # Add the owner of the scratch build on scratch builds. | |
2041 | elif self.build.type == "scratch" and self.build.user: | |
2042 | l.append("%s <%s>" % \ | |
2043 | (self.build.user.realname, self.build.user.email)) | |
2044 | ||
2045 | return set(l) | |
2046 | ||
2047 | def save_buildroot(self, pkgs): | |
2048 | rows = [] | |
2049 | ||
2050 | for pkg_name, pkg_uuid in pkgs: | |
2051 | rows.append((self.id, self.tries, pkg_uuid, pkg_name)) | |
2052 | ||
2053 | # Cleanup old stuff first (for rebuilding packages). | |
2054 | self.db.execute("DELETE FROM jobs_buildroots WHERE job_id = %s AND tries = %s", | |
2055 | self.id, self.tries) | |
2056 | ||
2057 | self.db.executemany("INSERT INTO \ | |
2058 | jobs_buildroots(job_id, tries, pkg_uuid, pkg_name) \ | |
2059 | VALUES(%s, %s, %s, %s)", rows) | |
2060 | ||
2061 | def has_buildroot(self, tries=None): | |
2062 | if tries is None: | |
2063 | tries = self.tries | |
2064 | ||
2065 | res = self.db.get("SELECT COUNT(*) AS num FROM jobs_buildroots \ | |
2066 | WHERE jobs_buildroots.job_id = %s AND jobs_buildroots.tries = %s \ | |
2067 | ORDER BY pkg_name", self.id, tries) | |
2068 | ||
2069 | if res: | |
2070 | return res.num | |
2071 | ||
2072 | return 0 | |
2073 | ||
2074 | def get_buildroot(self, tries=None): | |
2075 | if tries is None: | |
2076 | tries = self.tries | |
2077 | ||
2078 | rows = self.db.query("SELECT * FROM jobs_buildroots \ | |
2079 | WHERE jobs_buildroots.job_id = %s AND jobs_buildroots.tries = %s \ | |
2080 | ORDER BY pkg_name", self.id, tries) | |
2081 | ||
2082 | pkgs = [] | |
2083 | for row in rows: | |
2084 | # Search for this package in the packages table. | |
2085 | pkg = self.pakfire.packages.get_by_uuid(row.pkg_uuid) | |
2086 | pkgs.append((row.pkg_name, row.pkg_uuid, pkg)) | |
2087 | ||
2088 | return pkgs | |
2089 | ||
2090 | def send_finished_message(self): | |
2091 | # Send no finished mails for test jobs. | |
2092 | if self.type == "test": | |
2093 | return | |
2094 | ||
2095 | logging.debug("Sending finished message for job %s to %s" % \ | |
2096 | (self.name, ", ".join(self.message_recipients))) | |
2097 | ||
2098 | info = { | |
2099 | "build_name" : self.name, | |
2100 | "build_host" : self.builder.name, | |
2101 | "build_uuid" : self.uuid, | |
2102 | } | |
2103 | ||
2104 | self.pakfire.messages.send_to_all(self.message_recipients, | |
2105 | MSG_BUILD_FINISHED_SUBJECT, MSG_BUILD_FINISHED, info) | |
2106 | ||
2107 | def send_failed_message(self): | |
2108 | logging.debug("Sending failed message for job %s to %s" % \ | |
2109 | (self.name, ", ".join(self.message_recipients))) | |
2110 | ||
2111 | build_host = "--" | |
2112 | if self.builder: | |
2113 | build_host = self.builder.name | |
2114 | ||
2115 | info = { | |
2116 | "build_name" : self.name, | |
2117 | "build_host" : build_host, | |
2118 | "build_uuid" : self.uuid, | |
2119 | } | |
2120 | ||
2121 | self.pakfire.messages.send_to_all(self.message_recipients, | |
2122 | MSG_BUILD_FAILED_SUBJECT, MSG_BUILD_FAILED, info) | |
2123 | ||
2124 | def set_start_time(self, start_time): | |
2125 | if start_time is None: | |
2126 | return | |
2127 | ||
2128 | self.db.execute("UPDATE jobs SET start_not_before = NOW() + %s \ | |
2129 | WHERE id = %s LIMIT 1", start_time, self.id) | |
2130 | ||
2131 | def schedule(self, type, start_time=None, user=None): | |
2132 | assert type in ("rebuild", "test") | |
2133 | ||
2134 | if type == "rebuild": | |
2135 | if self.state == "finished": | |
2136 | return | |
2137 | ||
2138 | self.set_state("new", user=user, log=False) | |
2139 | self.set_start_time(start_time) | |
2140 | ||
2141 | # Log the event. | |
2142 | self.log("schedule_rebuild", user=user) | |
2143 | ||
2144 | elif type == "test": | |
2145 | if not self.state == "finished": | |
2146 | return | |
2147 | ||
2148 | # Create a new job with same build and arch. | |
2149 | job = self.create(self.pakfire, self.build, self.arch, type="test") | |
2150 | job.set_start_time(start_time) | |
2151 | ||
2152 | # Log the event. | |
2153 | self.log("schedule_test_job", test_job=job, user=user) | |
2154 | ||
2155 | return job | |
2156 | ||
2157 | def schedule_test(self, start_not_before=None, user=None): | |
2158 | # XXX to be removed | |
2159 | return self.schedule("test", start_time=start_not_before, user=user) | |
2160 | ||
2161 | def schedule_rebuild(self, start_not_before=None, user=None): | |
2162 | # XXX to be removed | |
2163 | return self.schedule("rebuild", start_time=start_not_before, user=user) | |
2164 | ||
2165 | def get_build_repos(self): | |
2166 | """ | |
2167 | Returns a list of all repositories that should be used when | |
2168 | building this job. | |
2169 | """ | |
2170 | repo_ids = self.db.query("SELECT repo_id FROM jobs_repos WHERE job_id = %s", | |
2171 | self.id) | |
2172 | ||
2173 | if not repo_ids: | |
2174 | return self.distro.get_build_repos() | |
2175 | ||
2176 | repos = [] | |
2177 | for repo in self.distro.repositories: | |
2178 | if repo.id in [r.id for r in repo_ids]: | |
2179 | repos.append(repo) | |
2180 | ||
2181 | return repos or self.distro.get_build_repos() | |
2182 | ||
2183 | def get_repo_config(self): | |
2184 | """ | |
2185 | Get repository configuration file that is sent to the builder. | |
2186 | """ | |
2187 | confs = [] | |
2188 | ||
2189 | for repo in self.get_build_repos(): | |
2190 | confs.append(repo.get_conf()) | |
2191 | ||
2192 | return "\n\n".join(confs) | |
2193 | ||
2194 | def get_config(self): | |
2195 | """ | |
2196 | Get configuration file that is sent to the builder. | |
2197 | """ | |
2198 | confs = [] | |
2199 | ||
2200 | # Add the distribution configuration. | |
2201 | confs.append(self.distro.get_config()) | |
2202 | ||
2203 | # Then add all repositories for this build. | |
2204 | confs.append(self.get_repo_config()) | |
2205 | ||
2206 | return "\n\n".join(confs) | |
2207 | ||
2208 | def used_by(self): | |
2209 | if not self.packages: | |
2210 | return [] | |
2211 | ||
2212 | conditions = [] | |
2213 | args = [] | |
2214 | ||
2215 | for pkg in self.packages: | |
2216 | conditions.append(" pkg_uuid = %s") | |
2217 | args.append(pkg.uuid) | |
2218 | ||
2219 | query = "SELECT DISTINCT job_id AS id FROM jobs_buildroots" | |
2220 | query += " WHERE %s" % " OR ".join(conditions) | |
2221 | ||
2222 | job_ids = self.db.query(query, *args) | |
2223 | ||
2224 | print job_ids | |
2225 | ||
2226 | def resolvdep(self): | |
2227 | config = pakfire.config.Config(files=["general.conf"]) | |
2228 | config.parse(self.get_config()) | |
2229 | ||
2230 | # The filename of the source file. | |
2231 | filename = os.path.join(PACKAGES_DIR, self.build.pkg.path) | |
2232 | assert os.path.exists(filename), filename | |
2233 | ||
2234 | # Create a new pakfire instance with the configuration for | |
2235 | # this build. | |
83be3106 | 2236 | p = pakfire.PakfireServer(config=config, arch=self.arch.name) |
f6e6ff79 MT |
2237 | |
2238 | # Try to solve the build dependencies. | |
2239 | try: | |
2240 | solver = p.resolvdep(filename) | |
2241 | ||
2242 | # Catch dependency errors and log the problem string. | |
2243 | except DependencyError, e: | |
2244 | self.state = "dependency_error" | |
2245 | self.update_message(e) | |
2246 | ||
2247 | else: | |
2248 | # If the build dependencies can be resolved, we set the build in | |
2249 | # pending state. | |
2250 | if solver.status is True: | |
2251 | if self.state in ("failed",): | |
2252 | return | |
2253 | ||
2254 | self.state = "pending" |