]>
Commit | Line | Data |
---|---|---|
f6e6ff79 MT |
1 | #!/usr/bin/python |
2 | ||
3 | import datetime | |
4 | import hashlib | |
5 | import logging | |
6 | import os | |
7 | import re | |
8 | import shutil | |
9 | import uuid | |
10 | ||
11 | import pakfire | |
12 | import pakfire.config | |
13 | import pakfire.packages | |
14 | ||
15 | import base | |
16 | import builders | |
17 | import logs | |
18 | import packages | |
19 | import repository | |
20 | import updates | |
21 | import users | |
22 | ||
23 | from constants import * | |
24 | ||
25 | def import_from_package(_pakfire, filename, distro=None, commit=None, type="release", | |
26 | arches=None, check_for_duplicates=True, owner=None): | |
27 | ||
28 | if distro is None: | |
29 | distro = commit.source.distro | |
30 | ||
31 | assert distro | |
32 | ||
33 | # Open the package file to read some basic information. | |
34 | pkg = pakfire.packages.open(None, None, filename) | |
35 | ||
36 | if check_for_duplicates: | |
37 | if distro.has_package(pkg.name, pkg.epoch, pkg.version, pkg.release): | |
38 | logging.warning("Duplicate package detected: %s. Skipping." % pkg) | |
39 | return | |
40 | ||
41 | # Open the package and add it to the database. | |
42 | pkg = packages.Package.open(_pakfire, filename) | |
43 | logging.debug("Created new package: %s" % pkg) | |
44 | ||
45 | # Associate the package to the processed commit. | |
46 | if commit: | |
47 | pkg.commit = commit | |
48 | ||
49 | # Create a new build object from the package which | |
50 | # is always a release build. | |
51 | build = Build.create(_pakfire, pkg, type=type, owner=owner, distro=distro) | |
52 | logging.debug("Created new build job: %s" % build) | |
53 | ||
54 | # Create all automatic jobs. | |
55 | build.create_autojobs(arches=arches) | |
56 | ||
57 | return pkg, build | |
58 | ||
59 | ||
60 | class Builds(base.Object): | |
eedc6432 MT |
61 | def get_by_id(self, id, data=None): |
62 | return Build(self.pakfire, id, data=data) | |
f6e6ff79 MT |
63 | |
64 | def get_by_uuid(self, uuid): | |
65 | build = self.db.get("SELECT id FROM builds WHERE uuid = %s LIMIT 1", uuid) | |
66 | ||
67 | if build: | |
68 | return self.get_by_id(build.id) | |
69 | ||
70 | def get_all(self, limit=50): | |
eedc6432 | 71 | query = "SELECT * FROM builds ORDER BY time_created DESC" |
f6e6ff79 MT |
72 | |
73 | if limit: | |
74 | query += " LIMIT %d" % limit | |
75 | ||
eedc6432 | 76 | return [self.get_by_id(b.id, b) for b in self.db.query(query)] |
f6e6ff79 | 77 | |
eedc6432 | 78 | def get_by_user(self, user, type=None, public=None): |
f6e6ff79 MT |
79 | args = [] |
80 | conditions = [] | |
81 | ||
82 | if not type or type == "scratch": | |
83 | # On scratch builds the user id equals the owner id. | |
84 | conditions.append("(builds.type = 'scratch' AND owner_id = %s)") | |
85 | args.append(user.id) | |
86 | ||
87 | elif not type or type == "release": | |
88 | pass # TODO | |
89 | ||
90 | if public is True: | |
91 | conditions.append("public = 'Y'") | |
92 | elif public is False: | |
93 | conditions.append("public = 'N'") | |
94 | ||
eedc6432 | 95 | query = "SELECT builds.* AS id FROM builds \ |
f6e6ff79 MT |
96 | JOIN packages ON builds.pkg_id = packages.id" |
97 | ||
98 | if conditions: | |
99 | query += " WHERE %s" % " AND ".join(conditions) | |
100 | ||
eedc6432 | 101 | query += " ORDER BY builds.time_created DESC" |
f6e6ff79 | 102 | |
eedc6432 | 103 | builds = [] |
f6e6ff79 | 104 | for build in self.db.query(query, *args): |
eedc6432 MT |
105 | build = Build(self.pakfire, build.id, build) |
106 | builds.append(build) | |
107 | ||
108 | return builds | |
f6e6ff79 | 109 | |
a15d6139 | 110 | def get_by_name(self, name, type=None, public=None, user=None, limit=None, offset=None): |
f6e6ff79 MT |
111 | args = [name,] |
112 | conditions = [ | |
113 | "packages.name = %s", | |
114 | ] | |
115 | ||
116 | if type: | |
117 | conditions.append("builds.type = %s") | |
118 | args.append(type) | |
119 | ||
120 | or_conditions = [] | |
121 | if public is True: | |
122 | or_conditions.append("public = 'Y'") | |
123 | elif public is False: | |
124 | or_conditions.append("public = 'N'") | |
125 | ||
126 | if user and not user.is_admin(): | |
127 | or_conditions.append("builds.owner_id = %s") | |
128 | args.append(user.id) | |
129 | ||
a15d6139 | 130 | query = "SELECT builds.* AS id FROM builds \ |
f6e6ff79 MT |
131 | JOIN packages ON builds.pkg_id = packages.id" |
132 | ||
133 | if or_conditions: | |
134 | conditions.append(" OR ".join(or_conditions)) | |
135 | ||
136 | if conditions: | |
137 | query += " WHERE %s" % " AND ".join(conditions) | |
138 | ||
a15d6139 MT |
139 | if type == "release": |
140 | query += " ORDER BY packages.name,packages.epoch,packages.version,packages.release,id ASC" | |
141 | elif type == "scratch": | |
142 | query += " ORDER BY time_created DESC" | |
f6e6ff79 | 143 | |
a15d6139 MT |
144 | if limit: |
145 | if offset: | |
146 | query += " LIMIT %s,%s" | |
147 | args.extend([offset, limit]) | |
148 | else: | |
149 | query += " LIMIT %s" | |
150 | args.append(limit) | |
151 | ||
152 | return [Build(self.pakfire, b.id, b) for b in self.db.query(query, *args)] | |
f6e6ff79 MT |
153 | |
154 | def get_latest_by_name(self, name, type=None, public=None): | |
2f45327a MT |
155 | query = "\ |
156 | SELECT * FROM builds \ | |
157 | LEFT JOIN builds_latest ON builds.id = builds_latest.build_id \ | |
158 | WHERE builds_latest.package_name = %s" | |
f6e6ff79 MT |
159 | args = [name,] |
160 | ||
2f45327a MT |
161 | if type: |
162 | query += " AND builds_latest.build_type = %s" | |
163 | args.append(type) | |
164 | ||
f6e6ff79 | 165 | if public is True: |
2f45327a MT |
166 | query += " AND builds.public = %s" |
167 | args.append("Y") | |
f6e6ff79 | 168 | elif public is False: |
2f45327a MT |
169 | query += " AND builds.public = %s" |
170 | args.append("N") | |
f6e6ff79 | 171 | |
2f45327a MT |
172 | # Get the last one only. |
173 | # Prefer release builds over scratch builds. | |
174 | query += "\ | |
175 | ORDER BY \ | |
176 | CASE builds.type WHEN 'release' THEN 0 ELSE 1 END, \ | |
177 | builds.time_created DESC \ | |
178 | LIMIT 1" | |
f6e6ff79 | 179 | |
2f45327a | 180 | res = self.db.get(query, *args) |
f6e6ff79 | 181 | |
2f45327a MT |
182 | if res: |
183 | return Build(self.pakfire, res.id, res) | |
f6e6ff79 | 184 | |
fd0e70ec MT |
185 | def get_active_builds(self, name, public=None): |
186 | query = "\ | |
aff0187d MT |
187 | SELECT * FROM builds \ |
188 | LEFT JOIN builds_latest ON builds.id = builds_latest.build_id \ | |
2f83864f MT |
189 | WHERE builds_latest.package_name = %s AND builds.type = %s" |
190 | args = [name, "release"] | |
fd0e70ec MT |
191 | |
192 | if public is True: | |
193 | query += " AND builds.public = %s" | |
194 | args.append("Y") | |
195 | elif public is False: | |
196 | query += " AND builds.public = %s" | |
197 | args.append("N") | |
198 | ||
fd0e70ec MT |
199 | builds = [] |
200 | for row in self.db.query(query, *args): | |
201 | b = Build(self.pakfire, row.id, row) | |
202 | builds.append(b) | |
203 | ||
204 | # Sort the result. Lastest build first. | |
205 | builds.sort(reverse=True) | |
206 | ||
207 | return builds | |
208 | ||
f6e6ff79 | 209 | def count(self): |
966498de MT |
210 | builds = self.db.get("SELECT COUNT(*) AS count FROM builds") |
211 | if builds: | |
212 | return builds.count | |
f6e6ff79 MT |
213 | |
214 | def needs_test(self, threshold, arch, limit=None, randomize=False): | |
215 | query = "SELECT id FROM builds \ | |
216 | WHERE NOT EXISTS \ | |
217 | (SELECT * FROM jobs WHERE \ | |
218 | jobs.build_id = builds.id AND \ | |
21fcb77a | 219 | jobs.arch_id = %s AND \ |
f6e6ff79 MT |
220 | (jobs.state != 'finished' OR \ |
221 | jobs.time_finished >= %s) \ | |
222 | ) \ | |
223 | AND EXISTS \ | |
224 | (SELECT * FROM jobs WHERE \ | |
225 | jobs.build_id = builds.id AND \ | |
226 | jobs.arch_id = %s AND \ | |
227 | jobs.type = 'build' AND \ | |
228 | jobs.state = 'finished' AND \ | |
229 | jobs.time_finished < %s \ | |
230 | ) \ | |
83be3106 MT |
231 | AND builds.type = 'release' \ |
232 | AND (builds.state = 'stable' OR builds.state = 'testing')" | |
21fcb77a | 233 | args = [arch.id, threshold, arch.id, threshold] |
f6e6ff79 MT |
234 | |
235 | if randomize: | |
236 | query += " ORDER BY RAND()" | |
237 | ||
238 | if limit: | |
239 | query += " LIMIT %s" | |
240 | args.append(limit) | |
241 | ||
242 | return [Build(self.pakfire, b.id) for b in self.db.query(query, *args)] | |
243 | ||
244 | def get_obsolete(self, repo=None): | |
245 | """ | |
246 | Get all obsoleted builds. | |
247 | ||
248 | If repo is True: which are in any repository. | |
249 | If repo is some Repository object: which are in this repository. | |
250 | """ | |
251 | args = [] | |
252 | ||
253 | if repo is None: | |
254 | query = "SELECT id FROM builds WHERE state = 'obsolete'" | |
255 | ||
256 | else: | |
257 | query = "SELECT build_id AS id FROM repositories_builds \ | |
258 | JOIN builds ON builds.id = repositories_builds.build_id \ | |
259 | WHERE builds.state = 'obsolete'" | |
260 | ||
261 | if repo and not repo is True: | |
262 | query += " AND repositories_builds.repo_id = %s" | |
263 | args.append(repo.id) | |
264 | ||
265 | res = self.db.query(query, *args) | |
266 | ||
267 | builds = [] | |
268 | for build in res: | |
269 | build = Build(self.pakfire, build.id) | |
270 | builds.append(build) | |
271 | ||
272 | return builds | |
273 | ||
4b1e87c4 MT |
274 | def get_changelog(self, name, public=None, limit=5, offset=0): |
275 | query = "SELECT builds.* FROM builds \ | |
276 | JOIN packages ON builds.pkg_id = packages.id \ | |
277 | WHERE \ | |
278 | builds.type = %s \ | |
279 | AND \ | |
280 | packages.name = %s" | |
281 | args = ["release", name,] | |
282 | ||
283 | if public == True: | |
284 | query += " AND builds.public = %s" | |
285 | args.append("Y") | |
286 | elif public == False: | |
287 | query += " AND builds.public = %s" | |
288 | args.append("N") | |
289 | ||
290 | query += " ORDER BY builds.time_created DESC" | |
291 | ||
292 | if limit: | |
293 | if offset: | |
294 | query += " LIMIT %s,%s" | |
295 | args += [offset, limit] | |
296 | else: | |
297 | query += " LIMIT %s" | |
298 | args.append(limit) | |
299 | ||
300 | builds = [] | |
301 | for b in self.db.query(query, *args): | |
302 | b = Build(self.pakfire, b.id, b) | |
303 | builds.append(b) | |
304 | ||
305 | builds.sort(reverse=True) | |
306 | ||
307 | return builds | |
308 | ||
62c7e7cd MT |
309 | def get_comments(self, limit=10, offset=None, user=None): |
310 | query = "SELECT * FROM builds_comments \ | |
311 | JOIN users ON builds_comments.user_id = users.id" | |
312 | args = [] | |
313 | ||
314 | wheres = [] | |
315 | if user: | |
316 | wheres.append("users.id = %s") | |
317 | args.append(user.id) | |
318 | ||
319 | if wheres: | |
320 | query += " WHERE %s" % " AND ".join(wheres) | |
321 | ||
322 | # Sort everything. | |
323 | query += " ORDER BY time_created DESC" | |
324 | ||
325 | # Limits. | |
326 | if limit: | |
327 | if offset: | |
328 | query += " LIMIT %s,%s" | |
329 | args.append(offset) | |
330 | else: | |
331 | query += " LIMIT %s" | |
332 | ||
333 | args.append(limit) | |
334 | ||
335 | comments = [] | |
336 | for comment in self.db.query(query, *args): | |
337 | comment = logs.CommentLogEntry(self.pakfire, comment) | |
338 | comments.append(comment) | |
339 | ||
340 | return comments | |
341 | ||
a90bd9b0 | 342 | def get_build_times_summary(self, name=None, job_type=None, arch=None): |
bc293d03 MT |
343 | query = "\ |
344 | SELECT \ | |
345 | builds_times.arch AS arch, \ | |
346 | MAX(duration) AS maximum, \ | |
347 | MIN(duration) AS minimum, \ | |
348 | AVG(duration) AS average, \ | |
349 | SUM(duration) AS sum, \ | |
350 | STDDEV_POP(duration) AS stddev \ | |
351 | FROM builds_times \ | |
352 | LEFT JOIN builds ON builds_times.build_id = builds.id \ | |
353 | LEFT JOIN packages ON builds.pkg_id = packages.id" | |
354 | ||
355 | args = [] | |
356 | conditions = [] | |
357 | ||
358 | # Filter for name. | |
359 | if name: | |
360 | conditions.append("packages.name = %s") | |
361 | args.append(name) | |
362 | ||
363 | # Filter by job types. | |
a90bd9b0 | 364 | if job_type: |
bc293d03 MT |
365 | conditions.append("builds_times.job_type = %s") |
366 | args.append(job_type) | |
367 | ||
a90bd9b0 MT |
368 | # Filter by arch. |
369 | if arch: | |
370 | conditions.append("builds_times.arch = %s") | |
371 | args.append(arch) | |
372 | ||
bc293d03 MT |
373 | # Add conditions. |
374 | if conditions: | |
375 | query += " WHERE %s" % " AND ".join(conditions) | |
376 | ||
377 | # Grouping and sorting. | |
378 | query += " GROUP BY arch ORDER BY arch DESC" | |
379 | ||
380 | return self.db.query(query, *args) | |
381 | ||
a90bd9b0 MT |
382 | def get_build_times_by_arch(self, arch, **kwargs): |
383 | kwargs.update({ | |
384 | "arch" : arch, | |
385 | }) | |
386 | ||
387 | build_times = self.get_build_times_summary(**kwargs) | |
388 | if build_times: | |
389 | return build_times[0] | |
390 | ||
6efed544 MT |
391 | def get_types_stats(self): |
392 | res = self.db.query("SELECT type, COUNT(*) AS count FROM builds GROUP BY type") | |
393 | ||
394 | if not res: | |
395 | return {} | |
396 | ||
397 | ret = {} | |
398 | for row in res: | |
399 | ret[row.type] = row.count | |
400 | ||
401 | return ret | |
402 | ||
f6e6ff79 MT |
403 | |
404 | class Build(base.Object): | |
734c61e0 | 405 | def __init__(self, pakfire, id, data=None): |
f6e6ff79 MT |
406 | base.Object.__init__(self, pakfire) |
407 | ||
408 | # ID of this build | |
409 | self.id = id | |
410 | ||
411 | # Cache data. | |
734c61e0 | 412 | self._data = data |
f6e6ff79 MT |
413 | self._jobs = None |
414 | self._jobs_test = None | |
415 | self._depends_on = None | |
416 | self._pkg = None | |
417 | self._credits = None | |
418 | self._owner = None | |
419 | self._update = None | |
420 | self._repo = None | |
421 | self._distro = None | |
422 | ||
423 | def __repr__(self): | |
424 | return "<%s id=%s %s>" % (self.__class__.__name__, self.id, self.pkg) | |
425 | ||
426 | def __cmp__(self, other): | |
427 | assert self.pkg | |
428 | assert other.pkg | |
429 | ||
430 | return cmp(self.pkg, other.pkg) | |
431 | ||
f6e6ff79 MT |
432 | @classmethod |
433 | def create(cls, pakfire, pkg, type="release", owner=None, distro=None, public=True): | |
434 | assert type in ("release", "scratch", "test") | |
435 | assert distro, "You need to specify the distribution of this build." | |
436 | ||
437 | if public: | |
438 | public = "Y" | |
439 | else: | |
440 | public = "N" | |
441 | ||
442 | # Check if scratch build has an owner. | |
443 | if type == "scratch" and not owner: | |
444 | raise Exception, "Scratch builds require an owner" | |
445 | ||
446 | # Set the default priority of this build. | |
447 | if type == "release": | |
448 | priority = 0 | |
449 | ||
450 | elif type == "scratch": | |
451 | priority = 1 | |
452 | ||
453 | elif type == "test": | |
454 | priority = -1 | |
455 | ||
456 | id = pakfire.db.execute(""" | |
457 | INSERT INTO builds(uuid, pkg_id, type, distro_id, time_created, public, priority) | |
458 | VALUES(%s, %s, %s, %s, NOW(), %s, %s)""", "%s" % uuid.uuid4(), pkg.id, | |
459 | type, distro.id, public, priority) | |
460 | ||
461 | # Set the owner of this buildgroup. | |
462 | if owner: | |
463 | pakfire.db.execute("UPDATE builds SET owner_id = %s WHERE id = %s", | |
464 | owner.id, id) | |
465 | ||
466 | build = cls(pakfire, id) | |
467 | ||
468 | # Log that the build has been created. | |
469 | build.log("created", user=owner) | |
470 | ||
471 | # Create directory where the files live. | |
472 | if not os.path.exists(build.path): | |
473 | os.makedirs(build.path) | |
474 | ||
475 | # Move package file to the directory of the build. | |
476 | source_path = os.path.join(build.path, "src") | |
477 | build.pkg.move(source_path) | |
478 | ||
479 | # Generate an update id. | |
480 | build.generate_update_id() | |
481 | ||
482 | # Obsolete all other builds with the same name to track updates. | |
483 | build.obsolete_others() | |
484 | ||
485 | # Search for possible bug IDs in the commit message. | |
486 | build.search_for_bugs() | |
487 | ||
488 | return build | |
489 | ||
490 | def delete(self): | |
491 | """ | |
492 | Deletes this build including all jobs, packages and the source | |
493 | package. | |
494 | """ | |
495 | # If the build is in a repository, we need to remove it. | |
496 | if self.repo: | |
497 | self.repo.rem_build(self) | |
498 | ||
499 | for job in self.jobs + self.test_jobs: | |
500 | job.delete() | |
501 | ||
502 | if self.pkg: | |
503 | self.pkg.delete() | |
504 | ||
505 | # Delete everything related to this build. | |
506 | self.__delete_bugs() | |
507 | self.__delete_comments() | |
508 | self.__delete_history() | |
509 | self.__delete_watchers() | |
510 | ||
511 | # Delete the build itself. | |
512 | self.db.execute("DELETE FROM builds WHERE id = %s", self.id) | |
f6e6ff79 MT |
513 | |
514 | def __delete_bugs(self): | |
515 | """ | |
516 | Delete all associated bugs. | |
517 | """ | |
518 | self.db.execute("DELETE FROM builds_bugs WHERE build_id = %s", self.id) | |
519 | ||
520 | def __delete_comments(self): | |
521 | """ | |
522 | Delete all comments. | |
523 | """ | |
524 | self.db.execute("DELETE FROM builds_comments WHERE build_id = %s", self.id) | |
525 | ||
526 | def __delete_history(self): | |
527 | """ | |
528 | Delete the repository history. | |
529 | """ | |
530 | self.db.execute("DELETE FROM repositories_history WHERE build_id = %s", self.id) | |
531 | ||
532 | def __delete_watchers(self): | |
533 | """ | |
534 | Delete all watchers. | |
535 | """ | |
536 | self.db.execute("DELETE FROM builds_watchers WHERE build_id = %s", self.id) | |
537 | ||
538 | def reset(self): | |
539 | """ | |
540 | Resets the whole build so it can start again (as it has never | |
541 | been started). | |
542 | """ | |
543 | for job in self.jobs: | |
544 | job.reset() | |
545 | ||
546 | #self.__delete_bugs() | |
547 | self.__delete_comments() | |
548 | self.__delete_history() | |
549 | self.__delete_watchers() | |
550 | ||
551 | self.state = "building" | |
552 | ||
553 | # XXX empty log | |
554 | ||
555 | @property | |
556 | def data(self): | |
557 | """ | |
558 | Lazy fetching of data for this object. | |
559 | """ | |
560 | if self._data is None: | |
966498de | 561 | self._data = self.db.get("SELECT * FROM builds WHERE id = %s", self.id) |
f6e6ff79 MT |
562 | assert self._data |
563 | ||
564 | return self._data | |
565 | ||
566 | @property | |
567 | def info(self): | |
568 | """ | |
569 | A set of information that is sent to the XMLRPC client. | |
570 | """ | |
571 | return { "uuid" : self.uuid } | |
572 | ||
573 | def log(self, action, user=None, bug_id=None): | |
574 | user_id = None | |
575 | if user: | |
576 | user_id = user.id | |
577 | ||
578 | self.db.execute("INSERT INTO builds_history(build_id, action, user_id, time, bug_id) \ | |
579 | VALUES(%s, %s, %s, NOW(), %s)", self.id, action, user_id, bug_id) | |
580 | ||
581 | @property | |
582 | def uuid(self): | |
583 | """ | |
584 | The UUID of this build. | |
585 | """ | |
586 | return self.data.uuid | |
587 | ||
588 | @property | |
589 | def pkg(self): | |
590 | """ | |
591 | Get package that is to be built in the build. | |
592 | """ | |
593 | if self._pkg is None: | |
594 | self._pkg = packages.Package(self.pakfire, self.data.pkg_id) | |
595 | ||
596 | return self._pkg | |
597 | ||
598 | @property | |
599 | def name(self): | |
600 | return "%s-%s" % (self.pkg.name, self.pkg.friendly_version) | |
601 | ||
602 | @property | |
603 | def type(self): | |
604 | """ | |
605 | The type of this build. | |
606 | """ | |
607 | return self.data.type | |
608 | ||
609 | @property | |
610 | def owner_id(self): | |
611 | """ | |
612 | The ID of the owner of this build. | |
613 | """ | |
614 | return self.data.owner_id | |
615 | ||
616 | @property | |
617 | def owner(self): | |
618 | """ | |
619 | The owner of this build. | |
620 | """ | |
621 | if not self.owner_id: | |
622 | return | |
623 | ||
624 | if self._owner is None: | |
625 | self._owner = self.pakfire.users.get_by_id(self.owner_id) | |
626 | assert self._owner | |
627 | ||
628 | return self._owner | |
629 | ||
630 | @property | |
631 | def distro_id(self): | |
632 | return self.data.distro_id | |
633 | ||
634 | @property | |
635 | def distro(self): | |
636 | if self._distro is None: | |
637 | self._distro = self.pakfire.distros.get_by_id(self.distro_id) | |
638 | assert self._distro | |
639 | ||
640 | return self._distro | |
641 | ||
642 | @property | |
643 | def user(self): | |
644 | if self.type == "scratch": | |
645 | return self.owner | |
646 | ||
647 | def get_depends_on(self): | |
648 | if self.data.depends_on and self._depends_on is None: | |
649 | self._depends_on = Build(self.pakfire, self.data.depends_on) | |
650 | ||
651 | return self._depends_on | |
652 | ||
653 | def set_depends_on(self, build): | |
654 | self.db.execute("UPDATE builds SET depends_on = %s WHERE id = %s", | |
655 | build.id, self.id) | |
f6e6ff79 MT |
656 | |
657 | # Update cache. | |
658 | self._depends_on = build | |
659 | self._data["depends_on"] = build.id | |
660 | ||
661 | depends_on = property(get_depends_on, set_depends_on) | |
662 | ||
663 | @property | |
664 | def created(self): | |
665 | return self.data.time_created | |
666 | ||
eedc6432 MT |
667 | @property |
668 | def date(self): | |
669 | return self.created.date() | |
670 | ||
f6e6ff79 MT |
671 | @property |
672 | def public(self): | |
673 | """ | |
674 | Is this build public? | |
675 | """ | |
676 | return self.data.public == "Y" | |
677 | ||
eedc6432 MT |
678 | @property |
679 | def size(self): | |
680 | """ | |
681 | Returns the size on disk of this build. | |
682 | """ | |
683 | s = 0 | |
684 | ||
685 | # Add the source package. | |
686 | if self.pkg: | |
687 | s += self.pkg.size | |
688 | ||
689 | # Add all jobs. | |
690 | s += sum((j.size for j in self.jobs)) | |
691 | ||
692 | return s | |
693 | ||
f6e6ff79 MT |
694 | #@property |
695 | #def state(self): | |
696 | # # Cache all states. | |
697 | # states = [j.state for j in self.jobs] | |
698 | # | |
699 | # target_state = "unknown" | |
700 | # | |
701 | # # If at least one job has failed, the whole build has failed. | |
702 | # if "failed" in states: | |
703 | # target_state = "failed" | |
704 | # | |
705 | # # It at least one of the jobs is still running, the whole | |
706 | # # build is in running state. | |
707 | # elif "running" in states: | |
708 | # target_state = "running" | |
709 | # | |
710 | # # If all jobs are in the finished state, we turn into finished | |
711 | # # state as well. | |
712 | # elif all([s == "finished" for s in states]): | |
713 | # target_state = "finished" | |
714 | # | |
715 | # return target_state | |
716 | ||
717 | def auto_update_state(self): | |
718 | """ | |
719 | Check if the state of this build can be updated and perform | |
720 | the change if possible. | |
721 | """ | |
722 | # Do not change the broken/obsolete state automatically. | |
723 | if self.state in ("broken", "obsolete"): | |
724 | return | |
725 | ||
726 | if self.repo and self.repo.type == "stable": | |
727 | self.update_state("stable") | |
728 | return | |
729 | ||
730 | # If any of the build jobs are finished, the build will be put in testing | |
731 | # state. | |
732 | for job in self.jobs: | |
733 | if job.state == "finished": | |
734 | self.update_state("testing") | |
735 | break | |
736 | ||
737 | def update_state(self, state, user=None, remove=False): | |
738 | assert state in ("stable", "testing", "obsolete", "broken") | |
739 | ||
740 | self.db.execute("UPDATE builds SET state = %s WHERE id = %s", state, self.id) | |
741 | ||
742 | if self._data: | |
743 | self._data["state"] = state | |
f6e6ff79 MT |
744 | |
745 | # In broken state, the removal from the repository is forced and | |
746 | # all jobs that are not finished yet will be aborted. | |
747 | if state == "broken": | |
748 | remove = True | |
749 | ||
750 | for job in self.jobs: | |
751 | if job.state in ("new", "pending", "running", "dependency_error"): | |
752 | job.state = "aborted" | |
753 | ||
754 | # If this build is in a repository, it will leave it. | |
755 | if remove and self.repo: | |
756 | self.repo.rem_build(self) | |
757 | ||
758 | # If a release build is now in testing state, we put it into the | |
759 | # first repository of the distribution. | |
760 | elif self.type == "release" and state == "testing": | |
761 | # If the build is not in a repository, yet and if there is | |
762 | # a first repository, we put the build there. | |
763 | if not self.repo and self.distro.first_repo: | |
764 | self.distro.first_repo.add_build(self, user=user) | |
765 | ||
766 | @property | |
767 | def state(self): | |
768 | return self.data.state | |
769 | ||
9fa1787c MT |
770 | def is_broken(self): |
771 | return self.state == "broken" | |
772 | ||
f6e6ff79 MT |
773 | def obsolete_others(self): |
774 | if not self.type == "release": | |
775 | return | |
776 | ||
777 | for build in self.pakfire.builds.get_by_name(self.pkg.name, type="release"): | |
778 | # Don't modify ourself. | |
779 | if self.id == build.id: | |
780 | continue | |
781 | ||
782 | # Don't touch broken builds. | |
783 | if build.state in ("obsolete", "broken"): | |
784 | continue | |
785 | ||
786 | # Obsolete the build. | |
787 | build.update_state("obsolete") | |
788 | ||
789 | def set_severity(self, severity): | |
790 | self.db.execute("UPDATE builds SET severity = %s WHERE id = %s", state, self.id) | |
791 | ||
792 | if self._data: | |
793 | self._data["severity"] = severity | |
f6e6ff79 MT |
794 | |
795 | def get_severity(self): | |
796 | return self.data.severity | |
797 | ||
798 | severity = property(get_severity, set_severity) | |
799 | ||
800 | @property | |
801 | def commit(self): | |
802 | if self.pkg and self.pkg.commit: | |
803 | return self.pkg.commit | |
804 | ||
805 | def update_message(self, msg): | |
806 | self.db.execute("UPDATE builds SET message = %s WHERE id = %s", msg, self.id) | |
807 | ||
808 | if self._data: | |
809 | self._data["message"] = msg | |
f6e6ff79 MT |
810 | |
811 | def has_perm(self, user): | |
812 | """ | |
813 | Check, if the given user has the right to perform administrative | |
814 | operations on this build. | |
815 | """ | |
816 | if user is None: | |
817 | return False | |
818 | ||
819 | if user.is_admin(): | |
820 | return True | |
821 | ||
822 | # Check if the user is allowed to manage packages from the critical path. | |
823 | if self.critical_path and not user.has_perm("manage_critical_path"): | |
824 | return False | |
825 | ||
826 | # Search for maintainers... | |
827 | ||
828 | # Scratch builds. | |
829 | if self.type == "scratch": | |
830 | # The owner of a scratch build has the right to do anything with it. | |
831 | if self.owner_id == user.id: | |
832 | return True | |
833 | ||
834 | # Release builds. | |
835 | elif self.type == "release": | |
836 | # The maintainer also is allowed to manage the build. | |
837 | if self.pkg.maintainer == user: | |
838 | return True | |
839 | ||
840 | # Deny permission for all other cases. | |
841 | return False | |
842 | ||
843 | @property | |
844 | def message(self): | |
845 | message = "" | |
846 | ||
847 | if self.data.message: | |
848 | message = self.data.message | |
849 | ||
850 | elif self.commit: | |
851 | if self.commit.message: | |
852 | message = "\n".join((self.commit.subject, self.commit.message)) | |
853 | else: | |
854 | message = self.commit.subject | |
855 | ||
856 | prefix = "%s: " % self.pkg.name | |
857 | if message.startswith(prefix): | |
858 | message = message[len(prefix):] | |
859 | ||
860 | return message | |
861 | ||
862 | def get_priority(self): | |
863 | return self.data.priority | |
864 | ||
865 | def set_priority(self, priority): | |
866 | assert priority in (-2, -1, 0, 1, 2) | |
867 | ||
868 | self.db.execute("UPDATE builds SET priority = %s WHERE id = %s", priority, | |
869 | self.id) | |
f6e6ff79 MT |
870 | |
871 | if self._data: | |
872 | self._data["priority"] = priority | |
873 | ||
874 | priority = property(get_priority, set_priority) | |
875 | ||
876 | @property | |
877 | def path(self): | |
878 | path = [] | |
879 | if self.type == "scratch": | |
880 | path.append(BUILD_SCRATCH_DIR) | |
881 | path.append(self.uuid) | |
882 | ||
883 | elif self.type == "release": | |
884 | path.append(BUILD_RELEASE_DIR) | |
885 | path.append("%s/%s-%s-%s" % \ | |
886 | (self.pkg.name, self.pkg.epoch, self.pkg.version, self.pkg.release)) | |
887 | ||
888 | else: | |
889 | raise Exception, "Unknown build type: %s" % self.type | |
890 | ||
891 | return os.path.join(*path) | |
892 | ||
893 | @property | |
894 | def source_filename(self): | |
895 | return os.path.basename(self.pkg.path) | |
896 | ||
897 | @property | |
898 | def download_prefix(self): | |
899 | return "/".join((self.pakfire.settings.get("download_baseurl"), "packages")) | |
900 | ||
901 | @property | |
902 | def source_download(self): | |
903 | return "/".join((self.download_prefix, self.pkg.path)) | |
904 | ||
905 | @property | |
906 | def source_hash_sha512(self): | |
907 | return self.pkg.hash_sha512 | |
908 | ||
909 | @property | |
910 | def link(self): | |
911 | # XXX maybe this should rather live in a uimodule. | |
912 | # zlib-1.2.3-2.ip3 [src, i686, blah...] | |
913 | s = """<a class="state_%s %s" href="/build/%s">%s</a>""" % \ | |
914 | (self.state, self.type, self.uuid, self.name) | |
915 | ||
916 | s_jobs = [] | |
917 | for job in self.jobs: | |
918 | s_jobs.append("""<a class="state_%s %s" href="/job/%s">%s</a>""" % \ | |
919 | (job.state, job.type, job.uuid, job.arch.name)) | |
920 | ||
921 | if s_jobs: | |
922 | s += " [%s]" % ", ".join(s_jobs) | |
923 | ||
924 | return s | |
925 | ||
926 | @property | |
927 | def supported_arches(self): | |
928 | return self.pkg.supported_arches | |
929 | ||
930 | @property | |
931 | def critical_path(self): | |
932 | return self.pkg.critical_path | |
933 | ||
934 | def get_jobs(self, type=None): | |
935 | """ | |
936 | Returns a list of jobs of this build. | |
937 | """ | |
938 | return self.pakfire.jobs.get_by_build(self.id, self, type=type) | |
939 | ||
940 | @property | |
941 | def jobs(self): | |
942 | """ | |
943 | Get a list of all build jobs that are in this build. | |
944 | """ | |
945 | if self._jobs is None: | |
946 | self._jobs = self.get_jobs(type="build") | |
947 | ||
948 | return self._jobs | |
949 | ||
950 | @property | |
951 | def test_jobs(self): | |
952 | if self._jobs_test is None: | |
953 | self._jobs_test = self.get_jobs(type="test") | |
954 | ||
955 | return self._jobs_test | |
956 | ||
957 | @property | |
958 | def all_jobs_finished(self): | |
959 | ret = True | |
960 | ||
961 | for job in self.jobs: | |
962 | if not job.state == "finished": | |
963 | ret = False | |
964 | break | |
965 | ||
966 | return ret | |
967 | ||
968 | def create_autojobs(self, arches=None, type="build"): | |
969 | jobs = [] | |
970 | ||
971 | # Arches may be passed to this function. If not we use all arches | |
972 | # this package supports. | |
973 | if arches is None: | |
974 | arches = self.supported_arches | |
975 | ||
976 | # Create a new job for every given archirecture. | |
977 | for arch in self.pakfire.arches.expand(arches): | |
978 | # Don't create jobs for src. | |
979 | if arch.name == "src": | |
980 | continue | |
981 | ||
982 | job = self.add_job(arch, type=type) | |
983 | jobs.append(job) | |
984 | ||
985 | # Return all newly created jobs. | |
986 | return jobs | |
987 | ||
988 | def add_job(self, arch, type="build"): | |
989 | job = Job.create(self.pakfire, self, arch, type=type) | |
990 | ||
991 | # Add new job to cache. | |
992 | if self._jobs: | |
993 | self._jobs.append(job) | |
994 | ||
995 | return job | |
996 | ||
997 | ## Update stuff | |
998 | ||
999 | @property | |
1000 | def update_id(self): | |
1001 | if not self.type == "release": | |
1002 | return | |
1003 | ||
1004 | # Generate an update ID if none does exist, yet. | |
1005 | self.generate_update_id() | |
1006 | ||
1007 | s = [ | |
1008 | "%s" % self.distro.name.replace(" ", "").upper(), | |
1009 | "%04d" % (self.data.update_year or 0), | |
1010 | "%04d" % (self.data.update_num or 0), | |
1011 | ] | |
1012 | ||
1013 | return "-".join(s) | |
1014 | ||
1015 | def generate_update_id(self): | |
1016 | if not self.type == "release": | |
1017 | return | |
1018 | ||
1019 | if self.data.update_num: | |
1020 | return | |
1021 | ||
1022 | update = self.db.get("SELECT update_num AS num FROM builds \ | |
1023 | WHERE update_year = YEAR(NOW()) ORDER BY update_num DESC LIMIT 1") | |
1024 | ||
1025 | if update: | |
1026 | update_num = update.num + 1 | |
1027 | else: | |
1028 | update_num = 1 | |
1029 | ||
1030 | self.db.execute("UPDATE builds SET update_year = YEAR(NOW()), update_num = %s \ | |
1031 | WHERE id = %s", update_num, self.id) | |
1032 | ||
1033 | ## Comment stuff | |
1034 | ||
1035 | def get_comments(self, limit=10, offset=0): | |
1036 | query = "SELECT * FROM builds_comments \ | |
1037 | JOIN users ON builds_comments.user_id = users.id \ | |
1038 | WHERE build_id = %s ORDER BY time_created ASC" | |
1039 | ||
1040 | comments = [] | |
1041 | for comment in self.db.query(query, self.id): | |
1042 | comment = logs.CommentLogEntry(self.pakfire, comment) | |
1043 | comments.append(comment) | |
1044 | ||
1045 | return comments | |
1046 | ||
1047 | def add_comment(self, user, text, credit): | |
1048 | # Add the new comment to the database. | |
1049 | id = self.db.execute("INSERT INTO \ | |
1050 | builds_comments(build_id, user_id, text, credit, time_created) \ | |
1051 | VALUES(%s, %s, %s, %s, NOW())", | |
1052 | self.id, user.id, text, credit) | |
1053 | ||
1054 | # Update the credit cache. | |
1055 | if not self._credits is None: | |
1056 | self._credits += credit | |
1057 | ||
1058 | # Send the new comment to all watchers and stuff. | |
1059 | self.send_comment_message(id) | |
1060 | ||
1061 | # Return the ID of the newly created comment. | |
1062 | return id | |
1063 | ||
1064 | @property | |
1065 | def score(self): | |
1066 | # XXX UPDATE THIS | |
1067 | if self._credits is None: | |
1068 | # Get the sum of the credits from the database. | |
1069 | query = self.db.get( | |
1070 | "SELECT SUM(credit) as credits FROM builds_comments WHERE build_id = %s", | |
1071 | self.id | |
1072 | ) | |
1073 | ||
1074 | self._credits = query.credits or 0 | |
1075 | ||
1076 | return self._credits | |
1077 | ||
1078 | @property | |
1079 | def credits(self): | |
1080 | # XXX COMPAT | |
1081 | return self.score | |
1082 | ||
1083 | def get_commenters(self): | |
1084 | users = self.db.query("SELECT DISTINCT users.id AS id FROM builds_comments \ | |
1085 | JOIN users ON builds_comments.user_id = users.id \ | |
1086 | WHERE builds_comments.build_id = %s AND NOT users.deleted = 'Y' \ | |
1087 | AND NOT users.activated = 'Y' ORDER BY users.id", self.id) | |
1088 | ||
1089 | return [users.User(self.pakfire, u.id) for u in users] | |
1090 | ||
1091 | def send_comment_message(self, comment_id): | |
1092 | comment = self.db.get("SELECT * FROM builds_comments WHERE id = %s", | |
1093 | comment_id) | |
1094 | ||
1095 | assert comment | |
1096 | assert comment.build_id == self.id | |
1097 | ||
1098 | # Get user who wrote the comment. | |
1099 | user = self.pakfire.users.get_by_id(comment.user_id) | |
1100 | ||
1101 | format = { | |
1102 | "build_name" : self.name, | |
1103 | "user_name" : user.realname, | |
1104 | } | |
1105 | ||
1106 | # XXX create beautiful message | |
1107 | ||
1108 | self.pakfire.messages.send_to_all(self.message_recipients, | |
1109 | N_("%(user_name)s commented on %(build_name)s"), | |
1110 | comment.text, format) | |
1111 | ||
1112 | ## Logging stuff | |
1113 | ||
1114 | def get_log(self, comments=True, repo=True, limit=None): | |
1115 | entries = [] | |
1116 | ||
fd681905 MT |
1117 | # Created entry. |
1118 | created_entry = logs.CreatedLogEntry(self.pakfire, self) | |
1119 | entries.append(created_entry) | |
1120 | ||
f6e6ff79 MT |
1121 | if comments: |
1122 | entries += self.get_comments(limit=limit) | |
1123 | ||
1124 | if repo: | |
1125 | entries += self.get_repo_moves(limit=limit) | |
1126 | ||
1127 | # Sort all entries in chronological order. | |
1128 | entries.sort() | |
1129 | ||
1130 | if limit: | |
1131 | entries = entries[:limit] | |
1132 | ||
1133 | return entries | |
1134 | ||
1135 | ## Watchers stuff | |
1136 | ||
1137 | def get_watchers(self): | |
1138 | query = self.db.query("SELECT DISTINCT user_id AS id FROM builds_watchers \ | |
1139 | JOIN users ON builds_watchers.user_id = users.id \ | |
1140 | WHERE builds_watchers.build_id = %s AND NOT users.deleted = 'Y' \ | |
1141 | AND users.activated = 'Y' ORDER BY users.id", self.id) | |
1142 | ||
1143 | return [users.User(self.pakfire, u.id) for u in query] | |
1144 | ||
1145 | def add_watcher(self, user): | |
1146 | # Don't add a user twice. | |
1147 | if user in self.get_watchers(): | |
1148 | return | |
1149 | ||
1150 | self.db.execute("INSERT INTO builds_watchers(build_id, user_id) \ | |
1151 | VALUES(%s, %s)", self.id, user.id) | |
1152 | ||
1153 | @property | |
1154 | def message_recipients(self): | |
1155 | ret = [] | |
1156 | ||
1157 | for watcher in self.get_watchers(): | |
1158 | ret.append("%s <%s>" % (watcher.realname, watcher.email)) | |
1159 | ||
1160 | return ret | |
1161 | ||
1162 | @property | |
1163 | def update(self): | |
1164 | if self._update is None: | |
1165 | update = self.db.get("SELECT update_id AS id FROM updates_builds \ | |
1166 | WHERE build_id = %s", self.id) | |
1167 | ||
1168 | if update: | |
1169 | self._update = updates.Update(self.pakfire, update.id) | |
1170 | ||
1171 | return self._update | |
1172 | ||
1173 | @property | |
1174 | def repo(self): | |
1175 | if self._repo is None: | |
1176 | repo = self.db.get("SELECT repo_id AS id FROM repositories_builds \ | |
1177 | WHERE build_id = %s", self.id) | |
1178 | ||
1179 | if repo: | |
1180 | self._repo = repository.Repository(self.pakfire, repo.id) | |
1181 | ||
1182 | return self._repo | |
1183 | ||
1184 | def get_repo_moves(self, limit=None): | |
1185 | query = "SELECT * FROM repositories_history \ | |
1186 | WHERE build_id = %s ORDER BY time ASC" | |
1187 | ||
1188 | actions = [] | |
1189 | for action in self.db.query(query, self.id): | |
1190 | action = logs.RepositoryLogEntry(self.pakfire, action) | |
1191 | actions.append(action) | |
1192 | ||
1193 | return actions | |
1194 | ||
1195 | @property | |
1196 | def is_loose(self): | |
1197 | if self.repo: | |
1198 | return False | |
1199 | ||
1200 | return True | |
1201 | ||
1202 | @property | |
1203 | def repo_time(self): | |
1204 | repo = self.db.get("SELECT time_added FROM repositories_builds \ | |
1205 | WHERE build_id = %s", self.id) | |
1206 | ||
1207 | if repo: | |
1208 | return repo.time_added | |
1209 | ||
1210 | def get_auto_move(self): | |
1211 | return self.data.auto_move == "Y" | |
1212 | ||
1213 | def set_auto_move(self, state): | |
1214 | if state: | |
1215 | state = "Y" | |
1216 | else: | |
1217 | state = "N" | |
1218 | ||
1219 | self.db.execute("UPDATE builds SET auto_move = %s WHERE id = %s", self.id) | |
1220 | if self._data: | |
1221 | self._data["auto_move"] = state | |
1222 | ||
1223 | auto_move = property(get_auto_move, set_auto_move) | |
1224 | ||
1225 | @property | |
1226 | def can_move_forward(self): | |
1227 | if not self.repo: | |
1228 | return False | |
1229 | ||
1230 | # If there is no next repository, we cannot move anything. | |
1231 | next_repo = self.repo.next() | |
1232 | ||
1233 | if not next_repo: | |
1234 | return False | |
1235 | ||
1236 | # If the needed amount of score is reached, we can move forward. | |
1237 | if self.score >= next_repo.score_needed: | |
1238 | return True | |
1239 | ||
1240 | # If the repository does not require a minimal time, | |
1241 | # we can move forward immediately. | |
1242 | if not self.repo.time_min: | |
1243 | return True | |
1244 | ||
1245 | query = self.db.get("SELECT NOW() - time_added AS duration FROM repositories_builds \ | |
1246 | WHERE build_id = %s", self.id) | |
1247 | duration = query.duration | |
1248 | ||
1249 | if duration >= self.repo.time_min: | |
1250 | return True | |
1251 | ||
1252 | return False | |
1253 | ||
1254 | ## Bugs | |
1255 | ||
1256 | def get_bug_ids(self): | |
1257 | query = self.db.query("SELECT bug_id FROM builds_bugs \ | |
1258 | WHERE build_id = %s", self.id) | |
1259 | ||
1260 | return [b.bug_id for b in query] | |
1261 | ||
1262 | def add_bug(self, bug_id, user=None, log=True): | |
1263 | # Check if this bug is already in the list of bugs. | |
1264 | if bug_id in self.get_bug_ids(): | |
1265 | return | |
1266 | ||
1267 | self.db.execute("INSERT INTO builds_bugs(build_id, bug_id) \ | |
1268 | VALUES(%s, %s)", self.id, bug_id) | |
1269 | ||
1270 | # Log the event. | |
1271 | if log: | |
1272 | self.log("bug_added", user=user, bug_id=bug_id) | |
1273 | ||
1274 | def rem_bug(self, bug_id, user=None, log=True): | |
1275 | self.db.execute("DELETE FROM builds_bugs WHERE build_id = %s AND \ | |
1276 | bug_id = %s", self.id, bug_id) | |
1277 | ||
1278 | # Log the event. | |
1279 | if log: | |
1280 | self.log("bug_removed", user=user, bug_id=bug_id) | |
1281 | ||
1282 | def search_for_bugs(self): | |
1283 | if not self.commit: | |
1284 | return | |
1285 | ||
1286 | pattern = re.compile(r"(bug\s?|#)(\d+)") | |
1287 | ||
1288 | for txt in (self.commit.subject, self.commit.message): | |
1289 | for bug in re.finditer(pattern, txt): | |
1290 | try: | |
1291 | bugid = int(bug.group(2)) | |
1292 | except ValueError: | |
1293 | continue | |
1294 | ||
1295 | # Check if a bug with the given ID exists in BZ. | |
1296 | bug = self.pakfire.bugzilla.get_bug(bugid) | |
1297 | if not bug: | |
1298 | continue | |
1299 | ||
1300 | self.add_bug(bugid) | |
1301 | ||
1302 | def get_bugs(self): | |
1303 | bugs = [] | |
1304 | for bug_id in self.get_bug_ids(): | |
1305 | bug = self.pakfire.bugzilla.get_bug(bug_id) | |
1306 | if not bug: | |
1307 | continue | |
1308 | ||
1309 | bugs.append(bug) | |
1310 | ||
1311 | return bugs | |
1312 | ||
1313 | def _update_bugs_helper(self, repo): | |
1314 | """ | |
1315 | This function takes a new status and generates messages that | |
1316 | are appended to all bugs. | |
1317 | """ | |
1318 | try: | |
1319 | kwargs = BUG_MESSAGES[repo.type].copy() | |
1320 | except KeyError: | |
1321 | return | |
1322 | ||
1323 | baseurl = self.pakfire.settings.get("baseurl", "") | |
1324 | args = { | |
1325 | "build_url" : "%s/build/%s" % (baseurl, self.uuid), | |
1326 | "distro_name" : self.distro.name, | |
1327 | "package_name" : self.name, | |
1328 | "repo_name" : repo.name, | |
1329 | } | |
1330 | kwargs["comment"] = kwargs["comment"] % args | |
1331 | ||
1332 | self.update_bugs(**kwargs) | |
1333 | ||
1334 | def _update_bug(self, bug_id, status=None, resolution=None, comment=None): | |
1335 | self.db.execute("INSERT INTO builds_bugs_updates(bug_id, status, resolution, comment, time) \ | |
1336 | VALUES(%s, %s, %s, %s, NOW())", bug_id, status, resolution, comment) | |
1337 | ||
1338 | def update_bugs(self, status, resolution=None, comment=None): | |
1339 | # Update all bugs linked to this build. | |
1340 | for bug_id in self.get_bug_ids(): | |
1341 | self._update_bug(bug_id, status=status, resolution=resolution, comment=comment) | |
1342 | ||
1343 | ||
1344 | class Jobs(base.Object): | |
6ea0393f MT |
1345 | def get_by_id(self, id, data=None): |
1346 | return Job(self.pakfire, id, data) | |
f6e6ff79 MT |
1347 | |
1348 | def get_by_uuid(self, uuid): | |
1349 | job = self.db.get("SELECT id FROM jobs WHERE uuid = %s", uuid) | |
1350 | ||
1351 | if job: | |
1352 | return self.get_by_id(job.id) | |
1353 | ||
1354 | def get_by_build(self, build_id, build=None, type=None): | |
1355 | """ | |
1356 | Get all jobs in the specifies build. | |
1357 | """ | |
9fa1787c | 1358 | query = "SELECT * FROM jobs WHERE build_id = %s" |
f6e6ff79 MT |
1359 | args = [build_id,] |
1360 | ||
1361 | if type: | |
1362 | query += " AND type = %s" | |
1363 | args.append(type) | |
1364 | ||
1365 | # Get IDs of all builds in this group. | |
1366 | jobs = [] | |
1367 | for job in self.db.query(query, *args): | |
9fa1787c | 1368 | job = Job(self.pakfire, job.id, job) |
f6e6ff79 MT |
1369 | |
1370 | # If the Build object was set, we set it so it won't be retrieved | |
1371 | # from the database again. | |
1372 | if build: | |
1373 | job._build = build | |
1374 | ||
1375 | jobs.append(job) | |
1376 | ||
1377 | # Return sorted list of jobs. | |
1378 | return sorted(jobs) | |
1379 | ||
163d9d8b MT |
1380 | def get_active(self, host_id=None, builder=None, states=None): |
1381 | if builder: | |
1382 | host_id = builder.id | |
f6e6ff79 | 1383 | |
163d9d8b MT |
1384 | if states is None: |
1385 | states = ["dispatching", "running", "uploading"] | |
f6e6ff79 | 1386 | |
163d9d8b MT |
1387 | query = "SELECT * FROM jobs WHERE state IN (%s)" % ", ".join(["%s"] * len(states)) |
1388 | args = states | |
f6e6ff79 MT |
1389 | |
1390 | if host_id: | |
1391 | query += " AND builder_id = %s" % host_id | |
1392 | ||
6e63ed49 MT |
1393 | query += " ORDER BY \ |
1394 | CASE \ | |
1395 | WHEN jobs.state = 'running' THEN 0 \ | |
1396 | WHEN jobs.state = 'uploading' THEN 1 \ | |
1397 | WHEN jobs.state = 'dispatching' THEN 2 \ | |
1398 | WHEN jobs.state = 'pending' THEN 3 \ | |
1399 | WHEN jobs.state = 'new' THEN 4 \ | |
1400 | END, time_started ASC" | |
f6e6ff79 | 1401 | |
163d9d8b | 1402 | return [Job(self.pakfire, j.id, j) for j in self.db.query(query, *args)] |
f6e6ff79 | 1403 | |
163d9d8b MT |
1404 | def get_next_iter(self, *args, **kwargs): |
1405 | return iter(self.get_next(*args, **kwargs)) | |
f6e6ff79 | 1406 | |
163d9d8b MT |
1407 | def get_next(self, arches=None, builder=None, limit=None, offset=None, type=None, |
1408 | state=None, states=None, max_tries=None): | |
f6e6ff79 | 1409 | |
c2902b29 MT |
1410 | if state and states is None: |
1411 | states = [state,] | |
163d9d8b | 1412 | |
c2902b29 MT |
1413 | query = "SELECT * FROM jobs \ |
1414 | INNER JOIN jobs_queue ON jobs.id = jobs_queue.id" | |
163d9d8b | 1415 | args = [] |
f6e6ff79 MT |
1416 | |
1417 | if arches: | |
c2902b29 MT |
1418 | query += " AND jobs_queue.arch IN (%s)" % ", ".join(["%s"] * len(arches)) |
1419 | args.extend(arches) | |
163d9d8b MT |
1420 | |
1421 | if builder: | |
c2902b29 MT |
1422 | query += " AND jobs_queue.designated_builder_id = %s" |
1423 | args.append(builder.id) | |
f6e6ff79 | 1424 | |
f6e6ff79 | 1425 | if max_tries: |
163d9d8b | 1426 | query += " AND jobs.max_tries <= %s" |
f6e6ff79 MT |
1427 | args.append(max_tries) |
1428 | ||
163d9d8b MT |
1429 | if states: |
1430 | query += " AND jobs.state IN (%s)" % ", ".join(["%s"] * len(states)) | |
1431 | args.extend(states) | |
1432 | ||
1433 | if type: | |
1434 | query += " AND jobs.type = %s" | |
1435 | args.append(type) | |
1436 | ||
f6e6ff79 | 1437 | if limit: |
163d9d8b MT |
1438 | query += " LIMIT %s" |
1439 | args.append(limit) | |
f6e6ff79 | 1440 | |
f6e6ff79 | 1441 | jobs = [] |
163d9d8b MT |
1442 | for row in self.db.query(query, *args): |
1443 | job = self.pakfire.jobs.get_by_id(row.id, row) | |
f6e6ff79 MT |
1444 | jobs.append(job) |
1445 | ||
c2902b29 MT |
1446 | # Reverse the order of the builds. |
1447 | jobs.reverse() | |
1448 | ||
f6e6ff79 MT |
1449 | return jobs |
1450 | ||
9177f86a | 1451 | def get_latest(self, arch=None, builder=None, limit=None, age=None, date=None): |
9fa1787c | 1452 | query = "SELECT * FROM jobs" |
6e63ed49 | 1453 | args = [] |
f6e6ff79 | 1454 | |
6e63ed49 | 1455 | where = ["(state = 'finished' OR state = 'failed' OR state = 'aborted')"] |
9177f86a MT |
1456 | |
1457 | if arch: | |
1458 | where.append("arch_id = %s") | |
1459 | args.append(arch.id) | |
1460 | ||
f6e6ff79 | 1461 | if builder: |
6e63ed49 MT |
1462 | where.append("builder_id = %s") |
1463 | args.append(builder.id) | |
1464 | ||
1465 | if date: | |
6e63ed49 | 1466 | try: |
9177f86a | 1467 | year, month, day = date.split("-", 2) |
6e63ed49 MT |
1468 | date = datetime.date(int(year), int(month), int(day)) |
1469 | except ValueError: | |
1470 | pass | |
6e63ed49 | 1471 | else: |
9177f86a MT |
1472 | where.append("(DATE(time_created) = %s OR \ |
1473 | DATE(time_started) = %s OR DATE(time_finished) = %s)") | |
1474 | args += (date, date, date) | |
6e63ed49 MT |
1475 | |
1476 | if age: | |
1477 | where.append("time_finished >= DATE_SUB(NOW(), INTERVAL %s)" % age) | |
f6e6ff79 MT |
1478 | |
1479 | if where: | |
1480 | query += " WHERE %s" % " AND ".join(where) | |
1481 | ||
6e63ed49 MT |
1482 | query += " ORDER BY time_finished DESC" |
1483 | ||
1484 | if limit: | |
1485 | query += " LIMIT %s" | |
1486 | args.append(limit) | |
f6e6ff79 | 1487 | |
6e63ed49 | 1488 | return [Job(self.pakfire, j.id, j) for j in self.db.query(query, *args)] |
f6e6ff79 MT |
1489 | |
1490 | def get_average_build_time(self): | |
1491 | """ | |
1492 | Returns the average build time of all finished builds from the | |
1493 | last 3 months. | |
1494 | """ | |
966498de MT |
1495 | result = self.db.get("SELECT AVG(time_finished - time_started) as average \ |
1496 | FROM jobs WHERE type = 'build' AND state = 'finished' AND \ | |
1497 | time_finished >= DATE_SUB(NOW(), INTERVAL 3 MONTH)") | |
f6e6ff79 | 1498 | |
966498de MT |
1499 | if result: |
1500 | return result.average | |
f6e6ff79 MT |
1501 | |
1502 | def count(self, *states): | |
966498de MT |
1503 | query = "SELECT COUNT(*) AS count FROM jobs" |
1504 | args = [] | |
f6e6ff79 | 1505 | |
966498de MT |
1506 | if states: |
1507 | query += " WHERE state IN %s" | |
1508 | args.append(states) | |
f6e6ff79 | 1509 | |
966498de MT |
1510 | jobs = self.db.get(query, *args) |
1511 | if jobs: | |
1512 | return jobs.count | |
f6e6ff79 | 1513 | |
6efed544 MT |
1514 | def get_queue_length(self, state=None): |
1515 | if state: | |
1516 | res = self.db.get("SELECT COUNT(*) AS count FROM jobs_queue \ | |
1517 | LEFT JOIN jobs ON jobs_queue.id = jobs.id WHERE state = %s", state) | |
1518 | else: | |
1519 | res = self.db.get("SELECT COUNT(*) AS count FROM jobs_queue") | |
c2902b29 MT |
1520 | |
1521 | if res: | |
1522 | return res.count | |
1523 | ||
6efed544 MT |
1524 | return 0 |
1525 | ||
1526 | def get_avg_wait_time(self): | |
1527 | res = self.db.get("SELECT AVG(time_waiting) AS time_waiting FROM jobs_waiting") | |
1528 | ||
1529 | if res and res.time_waiting: | |
1530 | try: | |
1531 | return int(res.time_waiting) | |
1532 | except ValueError: | |
1533 | return 0 | |
1534 | ||
1535 | return 0 | |
1536 | ||
1537 | def get_state_stats(self): | |
1538 | res = self.db.query("SELECT state, COUNT(*) AS count FROM jobs GROUP BY state") | |
1539 | ||
1540 | if not res: | |
1541 | return {} | |
1542 | ||
1543 | ret = { | |
1544 | "new" : 0, | |
1545 | "pending" : 0, | |
1546 | "running" : 0, | |
1547 | "finished" : 0, | |
1548 | "dispatching" : 0, | |
1549 | "uploading" : 0, | |
1550 | "failed" : 0, | |
1551 | "aborted" : 0, | |
1552 | "temporary_failed" : 0, | |
1553 | "dependency_error" : 0, | |
1554 | "download_error" : 0, | |
1555 | "deleted" : 0, | |
1556 | } | |
1557 | for row in res: | |
1558 | ret[row.state] = int(row.count) | |
1559 | ||
1560 | return ret | |
1561 | ||
1562 | def get_build_durations(self): | |
1563 | res = self.db.query("SELECT platform, MIN(duration) AS minimum, \ | |
1564 | MAX(duration) AS maximum, AVG(duration) AS average, \ | |
1565 | STDDEV_POP(duration) AS stddev \ | |
1566 | FROM builds_times GROUP BY platform \ | |
1567 | UNION SELECT 'all', MIN(duration) AS minimum, \ | |
1568 | MAX(duration) AS maximum, AVG(duration) AS average, \ | |
1569 | STDDEV_POP(duration) AS stddev \ | |
1570 | FROM builds_times") | |
1571 | ||
1572 | ret = {} | |
1573 | for row in res: | |
1574 | ret[row.platform] = { | |
1575 | "minimum" : int(row.minimum), | |
1576 | "maximum" : int(row.maximum), | |
1577 | "average" : int(row.average), | |
1578 | "stddev" : int(row.stddev), | |
1579 | } | |
1580 | ||
1581 | return ret | |
1582 | ||
f6e6ff79 MT |
1583 | |
1584 | class Job(base.Object): | |
9fa1787c | 1585 | def __init__(self, pakfire, id, data=None): |
f6e6ff79 MT |
1586 | base.Object.__init__(self, pakfire) |
1587 | ||
1588 | # The ID of this Job object. | |
1589 | self.id = id | |
1590 | ||
1591 | # Cache the data of this object. | |
9fa1787c | 1592 | self._data = data |
f6e6ff79 MT |
1593 | self._build = None |
1594 | self._builder = None | |
1595 | self._packages = None | |
1596 | self._logfiles = None | |
1597 | ||
1598 | def __str__(self): | |
1599 | return "<%s id=%s %s>" % (self.__class__.__name__, self.id, self.name) | |
1600 | ||
1601 | def __cmp__(self, other): | |
1602 | if self.type == "build" and other.type == "test": | |
1603 | return -1 | |
1604 | elif self.type == "test" and other.type == "build": | |
1605 | return 1 | |
1606 | ||
1607 | if self.build_id == other.build_id: | |
1608 | return cmp(self.arch, other.arch) | |
1609 | ||
1610 | ret = cmp(self.pkg, other.pkg) | |
1611 | ||
1612 | if not ret: | |
1613 | ret = cmp(self.time_created, other.time_created) | |
1614 | ||
1615 | return ret | |
1616 | ||
1617 | @property | |
1618 | def distro(self): | |
1619 | assert self.build.distro | |
1620 | return self.build.distro | |
1621 | ||
f6e6ff79 MT |
1622 | @classmethod |
1623 | def create(cls, pakfire, build, arch, type="build"): | |
1624 | id = pakfire.db.execute("INSERT INTO jobs(uuid, type, build_id, arch_id, time_created) \ | |
1625 | VALUES(%s, %s, %s, %s, NOW())", "%s" % uuid.uuid4(), type, build.id, arch.id) | |
1626 | ||
1627 | job = Job(pakfire, id) | |
1628 | job.log("created") | |
1629 | ||
1630 | # Set cache for Build object. | |
1631 | job._build = build | |
1632 | ||
1633 | # Jobs are by default in state "new" and wait for being checked | |
1634 | # for dependencies. Packages that do have no build dependencies | |
1635 | # can directly be forwarded to "pending" state. | |
1636 | if not job.pkg.requires: | |
1637 | job.state = "pending" | |
1638 | ||
1639 | return job | |
1640 | ||
1641 | def delete(self): | |
1642 | self.__delete_buildroots() | |
1643 | self.__delete_history() | |
1644 | self.__delete_packages() | |
1645 | self.__delete_logfiles() | |
1646 | ||
1647 | # Delete the job itself. | |
1648 | self.db.execute("DELETE FROM jobs WHERE id = %s", self.id) | |
f6e6ff79 MT |
1649 | |
1650 | def __delete_buildroots(self): | |
1651 | """ | |
1652 | Removes all buildroots. | |
1653 | """ | |
1654 | self.db.execute("DELETE FROM jobs_buildroots WHERE job_id = %s", self.id) | |
1655 | ||
1656 | def __delete_history(self): | |
1657 | """ | |
1658 | Removes all references in the history to this build job. | |
1659 | """ | |
1660 | self.db.execute("DELETE FROM jobs_history WHERE job_id = %s", self.id) | |
1661 | ||
1662 | def __delete_packages(self): | |
1663 | """ | |
1664 | Deletes all uploaded files from the job. | |
1665 | """ | |
1666 | for pkg in self.packages: | |
1667 | pkg.delete() | |
1668 | ||
1669 | self.db.execute("DELETE FROM jobs_packages WHERE job_id = %s", self.id) | |
1670 | ||
1671 | def __delete_logfiles(self): | |
1672 | for logfile in self.logfiles: | |
1673 | self.db.execute("INSERT INTO queue_delete(path) VALUES(%s)", logfile.path) | |
1674 | ||
1675 | def reset(self, user=None): | |
1676 | self.__delete_buildroots() | |
1677 | self.__delete_packages() | |
1678 | self.__delete_history() | |
1679 | self.__delete_logfiles() | |
1680 | ||
1681 | self.state = "new" | |
1682 | self.log("reset", user=user) | |
1683 | ||
1684 | @property | |
1685 | def data(self): | |
1686 | if self._data is None: | |
163d9d8b | 1687 | self._data = self.db.get("SELECT * FROM jobs WHERE id = %s", self.id) |
f6e6ff79 MT |
1688 | assert self._data |
1689 | ||
1690 | return self._data | |
1691 | ||
1692 | ## Logging stuff | |
1693 | ||
1694 | def log(self, action, user=None, state=None, builder=None, test_job=None): | |
1695 | user_id = None | |
1696 | if user: | |
1697 | user_id = user.id | |
1698 | ||
1699 | builder_id = None | |
1700 | if builder: | |
1701 | builder_id = builder.id | |
1702 | ||
1703 | test_job_id = None | |
1704 | if test_job: | |
1705 | test_job_id = test_job.id | |
1706 | ||
1707 | self.db.execute("INSERT INTO jobs_history(job_id, action, state, user_id, \ | |
1708 | time, builder_id, test_job_id) VALUES(%s, %s, %s, %s, NOW(), %s, %s)", | |
1709 | self.id, action, state, user_id, builder_id, test_job_id) | |
1710 | ||
1711 | def get_log(self, limit=None, offset=None, user=None): | |
1712 | query = "SELECT * FROM jobs_history" | |
1713 | ||
1714 | conditions = ["job_id = %s",] | |
1715 | args = [self.id,] | |
1716 | ||
1717 | if user: | |
1718 | conditions.append("user_id = %s") | |
1719 | args.append(user.id) | |
1720 | ||
1721 | if conditions: | |
1722 | query += " WHERE %s" % " AND ".join(conditions) | |
1723 | ||
1724 | query += " ORDER BY time DESC" | |
1725 | ||
1726 | if limit: | |
1727 | if offset: | |
1728 | query += " LIMIT %s,%s" | |
1729 | args += [offset, limit,] | |
1730 | else: | |
1731 | query += " LIMIT %s" | |
1732 | args += [limit,] | |
1733 | ||
1734 | entries = [] | |
1735 | for entry in self.db.query(query, *args): | |
1736 | entry = logs.JobLogEntry(self.pakfire, entry) | |
1737 | entries.append(entry) | |
1738 | ||
1739 | return entries | |
1740 | ||
1741 | @property | |
1742 | def uuid(self): | |
1743 | return self.data.uuid | |
1744 | ||
1745 | @property | |
1746 | def type(self): | |
1747 | return self.data.type | |
1748 | ||
1749 | @property | |
1750 | def build_id(self): | |
1751 | return self.data.build_id | |
1752 | ||
1753 | @property | |
1754 | def build(self): | |
1755 | if self._build is None: | |
1756 | self._build = self.pakfire.builds.get_by_id(self.build_id) | |
1757 | assert self._build | |
1758 | ||
1759 | return self._build | |
1760 | ||
1761 | @property | |
1762 | def related_jobs(self): | |
1763 | ret = [] | |
1764 | ||
1765 | for job in self.build.jobs: | |
1766 | if job == self: | |
1767 | continue | |
1768 | ||
1769 | ret.append(job) | |
1770 | ||
1771 | return ret | |
1772 | ||
1773 | @property | |
1774 | def pkg(self): | |
1775 | return self.build.pkg | |
1776 | ||
1777 | @property | |
1778 | def name(self): | |
1779 | return "%s-%s.%s" % (self.pkg.name, self.pkg.friendly_version, self.arch.name) | |
1780 | ||
eedc6432 MT |
1781 | @property |
1782 | def size(self): | |
1783 | return sum((p.size for p in self.packages)) | |
1784 | ||
a90bd9b0 MT |
1785 | def is_running(self): |
1786 | """ | |
1787 | Returns True if job is in a running state. | |
1788 | """ | |
1789 | return self.state in ("pending", "dispatching", "running", "uploading") | |
1790 | ||
f6e6ff79 MT |
1791 | def get_state(self): |
1792 | return self.data.state | |
1793 | ||
1794 | def set_state(self, state, user=None, log=True): | |
1795 | # Nothing to do if the state remains. | |
1796 | if not self.state == state: | |
1797 | self.db.execute("UPDATE jobs SET state = %s WHERE id = %s", state, self.id) | |
f6e6ff79 MT |
1798 | |
1799 | # Log the event. | |
1800 | if log and not state == "new": | |
1801 | self.log("state_change", state=state, user=user) | |
1802 | ||
1803 | # Update cache. | |
1804 | if self._data: | |
1805 | self._data["state"] = state | |
1806 | ||
1807 | # Always clear the message when the status is changed. | |
1808 | self.update_message(None) | |
1809 | ||
1810 | # Update some more informations. | |
1811 | if state == "dispatching": | |
1812 | # Set start time. | |
1813 | self.db.execute("UPDATE jobs SET time_started = NOW(), time_finished = NULL \ | |
1814 | WHERE id = %s", self.id) | |
1815 | ||
1816 | elif state == "pending": | |
1817 | self.db.execute("UPDATE jobs SET tries = tries + 1, time_started = NULL, \ | |
1818 | time_finished = NULL WHERE id = %s", self.id) | |
1819 | ||
1820 | elif state in ("aborted", "dependency_error", "finished", "failed"): | |
163d9d8b MT |
1821 | # Set finish time and reset builder.. |
1822 | self.db.execute("UPDATE jobs SET time_finished = NOW() WHERE id = %s", self.id) | |
f6e6ff79 MT |
1823 | |
1824 | # Send messages to the user. | |
1825 | if state == "finished": | |
1826 | self.send_finished_message() | |
1827 | ||
1828 | elif state == "failed": | |
1829 | # Remove all package files if a job is set to failed state. | |
1830 | self.__delete_packages() | |
1831 | ||
1832 | self.send_failed_message() | |
1833 | ||
1834 | # Automatically update the state of the build (not on test builds). | |
1835 | if self.type == "build": | |
1836 | self.build.auto_update_state() | |
1837 | ||
1838 | state = property(get_state, set_state) | |
1839 | ||
1840 | @property | |
1841 | def message(self): | |
1842 | return self.data.message | |
1843 | ||
1844 | def update_message(self, msg): | |
1845 | self.db.execute("UPDATE jobs SET message = %s WHERE id = %s", | |
1846 | msg, self.id) | |
f6e6ff79 MT |
1847 | |
1848 | if self._data: | |
1849 | self._data["message"] = msg | |
1850 | ||
1851 | @property | |
1852 | def builder_id(self): | |
1853 | return self.data.builder_id | |
1854 | ||
1855 | def get_builder(self): | |
1856 | if not self.builder_id: | |
1857 | return | |
1858 | ||
1859 | if self._builder is None: | |
1860 | self._builder = builders.Builder(self.pakfire, self.builder_id) | |
1861 | assert self._builder | |
1862 | ||
1863 | return self._builder | |
1864 | ||
1865 | def set_builder(self, builder, user=None): | |
1866 | self.db.execute("UPDATE jobs SET builder_id = %s WHERE id = %s", | |
1867 | builder.id, self.id) | |
1868 | ||
1869 | # Update cache. | |
1870 | if self._data: | |
1871 | self._data["builder_id"] = builder.id | |
f6e6ff79 MT |
1872 | |
1873 | self._builder = builder | |
1874 | ||
1875 | # Log the event. | |
1876 | if user: | |
1877 | self.log("builder_assigned", builder=builder, user=user) | |
1878 | ||
1879 | builder = property(get_builder, set_builder) | |
1880 | ||
1881 | @property | |
1882 | def arch_id(self): | |
1883 | return self.data.arch_id | |
1884 | ||
1885 | @property | |
1886 | def arch(self): | |
eb9d737f MT |
1887 | if not hasattr(self, "_arch"): |
1888 | self._arch = self.pakfire.arches.get_by_id(self.arch_id) | |
1889 | ||
1890 | return self._arch | |
f6e6ff79 MT |
1891 | |
1892 | @property | |
1893 | def duration(self): | |
1894 | if not self.time_started: | |
1895 | return 0 | |
1896 | ||
1897 | if self.time_finished: | |
1898 | delta = self.time_finished - self.time_started | |
1899 | else: | |
1900 | delta = datetime.datetime.utcnow() - self.time_started | |
1901 | ||
1902 | return delta.total_seconds() | |
1903 | ||
1904 | @property | |
1905 | def time_created(self): | |
1906 | return self.data.time_created | |
1907 | ||
1908 | @property | |
1909 | def time_started(self): | |
1910 | return self.data.time_started | |
1911 | ||
1912 | @property | |
1913 | def time_finished(self): | |
1914 | return self.data.time_finished | |
1915 | ||
a90bd9b0 MT |
1916 | @property |
1917 | def expected_runtime(self): | |
1918 | """ | |
1919 | Returns the estimated time and stddev, this job takes to finish. | |
1920 | """ | |
1921 | # Get the average build time. | |
1922 | build_times = self.pakfire.builds.get_build_times_by_arch(self.arch.name, | |
1923 | name=self.pkg.name) | |
1924 | ||
1925 | # If there is no statistical data, we cannot estimate anything. | |
1926 | if not build_times: | |
1927 | return None, None | |
1928 | ||
1929 | return build_times.average, build_times.stddev | |
1930 | ||
1931 | @property | |
1932 | def eta(self): | |
1933 | expected_runtime, stddev = self.expected_runtime | |
1934 | ||
1935 | if expected_runtime: | |
1936 | return expected_runtime - int(self.duration), stddev | |
1937 | ||
f6e6ff79 MT |
1938 | @property |
1939 | def tries(self): | |
1940 | return self.data.tries | |
1941 | ||
1942 | @property | |
1943 | def packages(self): | |
1944 | if self._packages is None: | |
1945 | self._packages = [] | |
1946 | ||
1947 | query = "SELECT pkg_id AS id FROM jobs_packages \ | |
1948 | JOIN packages ON packages.id = jobs_packages.pkg_id \ | |
1949 | WHERE jobs_packages.job_id = %s ORDER BY packages.name" | |
1950 | ||
1951 | for pkg in self.db.query(query, self.id): | |
1952 | pkg = packages.Package(self.pakfire, pkg.id) | |
1953 | pkg._job = self | |
1954 | ||
1955 | self._packages.append(pkg) | |
1956 | ||
1957 | return self._packages | |
1958 | ||
1959 | def get_pkg_by_uuid(self, uuid): | |
1960 | pkg = self.db.get("SELECT packages.id FROM packages \ | |
1961 | JOIN jobs_packages ON jobs_packages.pkg_id = packages.id \ | |
1962 | WHERE jobs_packages.job_id = %s AND packages.uuid = %s", | |
1963 | self.id, uuid) | |
1964 | ||
1965 | if not pkg: | |
1966 | return | |
1967 | ||
1968 | pkg = packages.Package(self.pakfire, pkg.id) | |
1969 | pkg._job = self | |
1970 | ||
1971 | return pkg | |
1972 | ||
1973 | @property | |
1974 | def logfiles(self): | |
1975 | if self._logfiles is None: | |
1976 | self._logfiles = [] | |
1977 | ||
1978 | for log in self.db.query("SELECT id FROM logfiles WHERE job_id = %s", self.id): | |
1979 | log = logs.LogFile(self.pakfire, log.id) | |
1980 | log._job = self | |
1981 | ||
1982 | self._logfiles.append(log) | |
1983 | ||
1984 | return self._logfiles | |
1985 | ||
1986 | def add_file(self, filename): | |
1987 | """ | |
1988 | Add the specified file to this job. | |
1989 | ||
1990 | The file is copied to the right directory by this function. | |
1991 | """ | |
1992 | assert os.path.exists(filename) | |
1993 | ||
1994 | if filename.endswith(".log"): | |
1995 | self._add_file_log(filename) | |
1996 | ||
1997 | elif filename.endswith(".%s" % PACKAGE_EXTENSION): | |
1998 | # It is not allowed to upload packages on test builds. | |
1999 | if self.type == "test": | |
2000 | return | |
2001 | ||
2002 | self._add_file_package(filename) | |
2003 | ||
2004 | def _add_file_log(self, filename): | |
2005 | """ | |
2006 | Attach a log file to this job. | |
2007 | """ | |
2008 | target_dirname = os.path.join(self.build.path, "logs") | |
2009 | ||
2010 | if self.type == "test": | |
2011 | i = 1 | |
2012 | while True: | |
2013 | target_filename = os.path.join(target_dirname, | |
2014 | "test.%s.%s.%s.log" % (self.arch.name, i, self.tries)) | |
2015 | ||
2016 | if os.path.exists(target_filename): | |
2017 | i += 1 | |
2018 | else: | |
2019 | break | |
2020 | else: | |
2021 | target_filename = os.path.join(target_dirname, | |
2022 | "build.%s.%s.log" % (self.arch.name, self.tries)) | |
2023 | ||
2024 | # Make sure the target directory exists. | |
2025 | if not os.path.exists(target_dirname): | |
2026 | os.makedirs(target_dirname) | |
2027 | ||
2028 | # Calculate a SHA512 hash from that file. | |
2029 | f = open(filename, "rb") | |
2030 | h = hashlib.sha512() | |
2031 | while True: | |
2032 | buf = f.read(BUFFER_SIZE) | |
2033 | if not buf: | |
2034 | break | |
2035 | ||
2036 | h.update(buf) | |
2037 | f.close() | |
2038 | ||
2039 | # Copy the file to the final location. | |
2040 | shutil.copy2(filename, target_filename) | |
2041 | ||
2042 | # Create an entry in the database. | |
2043 | self.db.execute("INSERT INTO logfiles(job_id, path, filesize, hash_sha512) \ | |
2044 | VALUES(%s, %s, %s, %s)", self.id, os.path.relpath(target_filename, PACKAGES_DIR), | |
2045 | os.path.getsize(target_filename), h.hexdigest()) | |
2046 | ||
2047 | def _add_file_package(self, filename): | |
2048 | # Open package (creates entry in the database). | |
2049 | pkg = packages.Package.open(self.pakfire, filename) | |
2050 | ||
2051 | # Move package to the build directory. | |
2052 | pkg.move(os.path.join(self.build.path, self.arch.name)) | |
2053 | ||
2054 | # Attach the package to this job. | |
2055 | self.db.execute("INSERT INTO jobs_packages(job_id, pkg_id) VALUES(%s, %s)", | |
2056 | self.id, pkg.id) | |
2057 | ||
2058 | def get_aborted_state(self): | |
2059 | return self.data.aborted_state | |
2060 | ||
2061 | def set_aborted_state(self, state): | |
2062 | self.db.execute("UPDATE jobs SET aborted_state = %s WHERE id = %s", | |
2063 | state, self.id) | |
f6e6ff79 MT |
2064 | |
2065 | if self._data: | |
2066 | self._data["aborted_state"] = state | |
2067 | ||
2068 | aborted_state = property(get_aborted_state, set_aborted_state) | |
2069 | ||
2070 | @property | |
2071 | def message_recipients(self): | |
2072 | l = [] | |
2073 | ||
2074 | # Add all people watching the build. | |
2075 | l += self.build.message_recipients | |
2076 | ||
2077 | # Add the package maintainer on release builds. | |
2078 | if self.build.type == "release": | |
2079 | maint = self.pkg.maintainer | |
2080 | ||
2081 | if isinstance(maint, users.User): | |
2082 | l.append("%s <%s>" % (maint.realname, maint.email)) | |
2083 | elif maint: | |
2084 | l.append(maint) | |
2085 | ||
2086 | # XXX add committer and commit author. | |
2087 | ||
2088 | # Add the owner of the scratch build on scratch builds. | |
2089 | elif self.build.type == "scratch" and self.build.user: | |
2090 | l.append("%s <%s>" % \ | |
2091 | (self.build.user.realname, self.build.user.email)) | |
2092 | ||
2093 | return set(l) | |
2094 | ||
2095 | def save_buildroot(self, pkgs): | |
2096 | rows = [] | |
2097 | ||
2098 | for pkg_name, pkg_uuid in pkgs: | |
2099 | rows.append((self.id, self.tries, pkg_uuid, pkg_name)) | |
2100 | ||
2101 | # Cleanup old stuff first (for rebuilding packages). | |
2102 | self.db.execute("DELETE FROM jobs_buildroots WHERE job_id = %s AND tries = %s", | |
2103 | self.id, self.tries) | |
2104 | ||
2105 | self.db.executemany("INSERT INTO \ | |
2106 | jobs_buildroots(job_id, tries, pkg_uuid, pkg_name) \ | |
2107 | VALUES(%s, %s, %s, %s)", rows) | |
2108 | ||
2109 | def has_buildroot(self, tries=None): | |
2110 | if tries is None: | |
2111 | tries = self.tries | |
2112 | ||
2113 | res = self.db.get("SELECT COUNT(*) AS num FROM jobs_buildroots \ | |
2114 | WHERE jobs_buildroots.job_id = %s AND jobs_buildroots.tries = %s \ | |
2115 | ORDER BY pkg_name", self.id, tries) | |
2116 | ||
2117 | if res: | |
2118 | return res.num | |
2119 | ||
2120 | return 0 | |
2121 | ||
2122 | def get_buildroot(self, tries=None): | |
2123 | if tries is None: | |
2124 | tries = self.tries | |
2125 | ||
2126 | rows = self.db.query("SELECT * FROM jobs_buildroots \ | |
2127 | WHERE jobs_buildroots.job_id = %s AND jobs_buildroots.tries = %s \ | |
2128 | ORDER BY pkg_name", self.id, tries) | |
2129 | ||
2130 | pkgs = [] | |
2131 | for row in rows: | |
2132 | # Search for this package in the packages table. | |
2133 | pkg = self.pakfire.packages.get_by_uuid(row.pkg_uuid) | |
2134 | pkgs.append((row.pkg_name, row.pkg_uuid, pkg)) | |
2135 | ||
2136 | return pkgs | |
2137 | ||
2138 | def send_finished_message(self): | |
2139 | # Send no finished mails for test jobs. | |
2140 | if self.type == "test": | |
2141 | return | |
2142 | ||
2143 | logging.debug("Sending finished message for job %s to %s" % \ | |
2144 | (self.name, ", ".join(self.message_recipients))) | |
2145 | ||
2146 | info = { | |
2147 | "build_name" : self.name, | |
2148 | "build_host" : self.builder.name, | |
2149 | "build_uuid" : self.uuid, | |
2150 | } | |
2151 | ||
2152 | self.pakfire.messages.send_to_all(self.message_recipients, | |
2153 | MSG_BUILD_FINISHED_SUBJECT, MSG_BUILD_FINISHED, info) | |
2154 | ||
2155 | def send_failed_message(self): | |
2156 | logging.debug("Sending failed message for job %s to %s" % \ | |
2157 | (self.name, ", ".join(self.message_recipients))) | |
2158 | ||
2159 | build_host = "--" | |
2160 | if self.builder: | |
2161 | build_host = self.builder.name | |
2162 | ||
2163 | info = { | |
2164 | "build_name" : self.name, | |
2165 | "build_host" : build_host, | |
2166 | "build_uuid" : self.uuid, | |
2167 | } | |
2168 | ||
2169 | self.pakfire.messages.send_to_all(self.message_recipients, | |
2170 | MSG_BUILD_FAILED_SUBJECT, MSG_BUILD_FAILED, info) | |
2171 | ||
2172 | def set_start_time(self, start_time): | |
2173 | if start_time is None: | |
2174 | return | |
2175 | ||
2176 | self.db.execute("UPDATE jobs SET start_not_before = NOW() + %s \ | |
2177 | WHERE id = %s LIMIT 1", start_time, self.id) | |
2178 | ||
2179 | def schedule(self, type, start_time=None, user=None): | |
2180 | assert type in ("rebuild", "test") | |
2181 | ||
2182 | if type == "rebuild": | |
2183 | if self.state == "finished": | |
2184 | return | |
2185 | ||
2186 | self.set_state("new", user=user, log=False) | |
2187 | self.set_start_time(start_time) | |
2188 | ||
2189 | # Log the event. | |
2190 | self.log("schedule_rebuild", user=user) | |
2191 | ||
2192 | elif type == "test": | |
2193 | if not self.state == "finished": | |
2194 | return | |
2195 | ||
2196 | # Create a new job with same build and arch. | |
2197 | job = self.create(self.pakfire, self.build, self.arch, type="test") | |
2198 | job.set_start_time(start_time) | |
2199 | ||
2200 | # Log the event. | |
2201 | self.log("schedule_test_job", test_job=job, user=user) | |
2202 | ||
2203 | return job | |
2204 | ||
2205 | def schedule_test(self, start_not_before=None, user=None): | |
2206 | # XXX to be removed | |
2207 | return self.schedule("test", start_time=start_not_before, user=user) | |
2208 | ||
2209 | def schedule_rebuild(self, start_not_before=None, user=None): | |
2210 | # XXX to be removed | |
2211 | return self.schedule("rebuild", start_time=start_not_before, user=user) | |
2212 | ||
2213 | def get_build_repos(self): | |
2214 | """ | |
2215 | Returns a list of all repositories that should be used when | |
2216 | building this job. | |
2217 | """ | |
2218 | repo_ids = self.db.query("SELECT repo_id FROM jobs_repos WHERE job_id = %s", | |
2219 | self.id) | |
2220 | ||
2221 | if not repo_ids: | |
2222 | return self.distro.get_build_repos() | |
2223 | ||
2224 | repos = [] | |
2225 | for repo in self.distro.repositories: | |
2226 | if repo.id in [r.id for r in repo_ids]: | |
2227 | repos.append(repo) | |
2228 | ||
2229 | return repos or self.distro.get_build_repos() | |
2230 | ||
2231 | def get_repo_config(self): | |
2232 | """ | |
2233 | Get repository configuration file that is sent to the builder. | |
2234 | """ | |
2235 | confs = [] | |
2236 | ||
2237 | for repo in self.get_build_repos(): | |
2238 | confs.append(repo.get_conf()) | |
2239 | ||
2240 | return "\n\n".join(confs) | |
2241 | ||
2242 | def get_config(self): | |
2243 | """ | |
2244 | Get configuration file that is sent to the builder. | |
2245 | """ | |
2246 | confs = [] | |
2247 | ||
2248 | # Add the distribution configuration. | |
2249 | confs.append(self.distro.get_config()) | |
2250 | ||
2251 | # Then add all repositories for this build. | |
2252 | confs.append(self.get_repo_config()) | |
2253 | ||
2254 | return "\n\n".join(confs) | |
2255 | ||
2256 | def used_by(self): | |
2257 | if not self.packages: | |
2258 | return [] | |
2259 | ||
2260 | conditions = [] | |
2261 | args = [] | |
2262 | ||
2263 | for pkg in self.packages: | |
2264 | conditions.append(" pkg_uuid = %s") | |
2265 | args.append(pkg.uuid) | |
2266 | ||
2267 | query = "SELECT DISTINCT job_id AS id FROM jobs_buildroots" | |
2268 | query += " WHERE %s" % " OR ".join(conditions) | |
2269 | ||
2270 | job_ids = self.db.query(query, *args) | |
2271 | ||
2272 | print job_ids | |
2273 | ||
2274 | def resolvdep(self): | |
2275 | config = pakfire.config.Config(files=["general.conf"]) | |
2276 | config.parse(self.get_config()) | |
2277 | ||
2278 | # The filename of the source file. | |
2279 | filename = os.path.join(PACKAGES_DIR, self.build.pkg.path) | |
2280 | assert os.path.exists(filename), filename | |
2281 | ||
2282 | # Create a new pakfire instance with the configuration for | |
2283 | # this build. | |
83be3106 | 2284 | p = pakfire.PakfireServer(config=config, arch=self.arch.name) |
f6e6ff79 MT |
2285 | |
2286 | # Try to solve the build dependencies. | |
2287 | try: | |
2288 | solver = p.resolvdep(filename) | |
2289 | ||
2290 | # Catch dependency errors and log the problem string. | |
2291 | except DependencyError, e: | |
2292 | self.state = "dependency_error" | |
2293 | self.update_message(e) | |
2294 | ||
2295 | else: | |
2296 | # If the build dependencies can be resolved, we set the build in | |
2297 | # pending state. | |
2298 | if solver.status is True: | |
2299 | if self.state in ("failed",): | |
2300 | return | |
2301 | ||
2302 | self.state = "pending" |