]> git.ipfire.org Git - people/jschlag/pbs.git/blob - src/buildservice/repository.py
Merge branch 'master' of git://git.ipfire.org/pbs
[people/jschlag/pbs.git] / src / buildservice / repository.py
1 #!/usr/bin/python
2
3 import logging
4 import os.path
5
6 import pakfire
7
8 log = logging.getLogger("repositories")
9 log.propagate = 1
10
11 from . import base
12 from . import logs
13
14 from .constants import *
15 from .decorators import *
16
17 class Repositories(base.Object):
18 def _get_repository(self, query, *args):
19 res = self.db.get(query, *args)
20
21 if res:
22 return Repository(self.backend, res.id, data=res)
23
24 def _get_repositories(self, query, *args):
25 res = self.db.query(query, *args)
26
27 for row in res:
28 yield Repository(self.backend, row.id, data=row)
29
30 def __iter__(self):
31 repositories = self._get_repositories("SELECT * FROM repositories \
32 WHERE deleted IS FALSE ORDER BY distro_id, name")
33
34 return iter(repositories)
35
36 def create(self, distro, name, description):
37 return self._get_repository("INSERT INTO repositories(distro_id, name, description) \
38 VALUES(%s, %s, %s) RETURNING *", distro.id, name, description)
39
40 def get_by_id(self, repo_id):
41 return self._get_repository("SELECT * FROM repositories \
42 WHERE id = %s", repo_id)
43
44 def get_history(self, limit=None, offset=None, build=None, repo=None, user=None):
45 query = "SELECT * FROM repositories_history"
46 args = []
47
48 query += " ORDER BY time DESC"
49
50 if limit:
51 if offset:
52 query += " LIMIT %s,%s"
53 args += [offset, limit,]
54 else:
55 query += " LIMIT %s"
56 args += [limit,]
57
58 entries = []
59 for entry in self.db.query(query, *args):
60 entry = logs.RepositoryLogEntry(self.pakfire, entry)
61 entries.append(entry)
62
63 return entries
64
65 def remaster(self):
66 """
67 Remasters all repositories
68 """
69 for repo in self:
70 # Skip all repositories that don't need an update
71 if not repo.needs_update:
72 log.debug("Repository %s does not need an update" % repo)
73 continue
74
75 with self.db.transaction():
76 repo.remaster()
77
78 def cleanup(self):
79 """
80 Cleans up all repositories
81 """
82 for repo in self:
83 with self.db.transaction():
84 repo.cleanup()
85
86
87 class Repository(base.DataObject):
88 table = "repositories"
89
90 def __eq__(self, other):
91 if isinstance(other, self.__class__):
92 return self.id == other.id
93
94 def __lt__(self, other):
95 if isinstance(other, self.__class__):
96 return self.parent_id == other.id
97
98 def __iter__(self):
99 builds = self.backend.builds._get_builds("SELECT builds.* FROM repositories_builds \
100 LEFT JOIN builds ON repositories_builds.build_id = builds.id \
101 WHERE repositories_builds.repo_id = %s", self.id)
102
103 return iter(builds)
104
105 def __len__(self):
106 res = self.db.get("SELECT COUNT(*) AS len FROM repositories_builds \
107 WHERE repo_id = %s", self.id)
108
109 return res.len
110
111 def __nonzero__(self):
112 return True
113
114 @lazy_property
115 def next(self):
116 return self.backend.repos._get_repository("SELECT * FROM repositories \
117 WHERE parent_id = %s", self.id)
118
119 @lazy_property
120 def parent(self):
121 if self.data.parent_id:
122 return self.backend.repos._get_repository("SELECT * FROM repositories \
123 WHERE id = %s", self.data.parent_id)
124
125 @lazy_property
126 def distro(self):
127 return self.backend.distros.get_by_id(self.data.distro_id)
128
129 def set_priority(self, priority):
130 self._set_attribute("priority", priority)
131
132 priority = property(lambda s: s.data.priority, set_priority)
133
134 def get_user(self):
135 if self.data.user_id:
136 return self.backend.users.get_by_id(self.data.user_id)
137
138 def set_user(self, user):
139 self._set_attribute("user_id", user.id)
140
141 user = property(get_user, set_user)
142
143 @property
144 def info(self):
145 return {
146 "id" : self.id,
147 "distro" : self.distro.info,
148 "name" : self.name,
149 "arches" : self.arches,
150 }
151
152 @property
153 def basepath(self):
154 return "/".join((
155 self.distro.identifier,
156 self.identifier,
157 ))
158
159 @property
160 def path(self):
161 return os.path.join(REPOS_DIR, self.basepath)
162
163 @property
164 def url(self):
165 return os.path.join(
166 self.settings.get("baseurl", "https://pakfire.ipfire.org"),
167 "repositories",
168 self.basepath,
169 )
170
171 @property
172 def mirrorlist(self):
173 return os.path.join(
174 self.settings.get("baseurl", "https://pakfire.ipfire.org"),
175 "distro", self.distro.identifier,
176 "repo", self.identifier,
177 "mirrorlist?arch=%{arch}"
178 )
179
180 def get_conf(self, local=False):
181 lines = [
182 "[repo:%s]" % self.identifier,
183 "description = %s - %s" % (self.distro.name, self.summary),
184 "enabled = 1",
185 "baseurl = %s/%{arch}" % (self.path if local else self.url),
186 ]
187
188 if self.mirrored and not local:
189 lines.append("mirrors = %s" % self.mirrorlist)
190
191 if self.priority:
192 lines.append("priority = %s" % self.priority)
193
194 return "\n".join(lines)
195
196 @property
197 def name(self):
198 return self.data.name
199
200 @property
201 def identifier(self):
202 return self.name.lower()
203
204 @property
205 def type(self):
206 return self.data.type
207
208 @property
209 def summary(self):
210 lines = self.description.splitlines()
211
212 if lines:
213 return lines[0]
214
215 return "N/A"
216
217 @property
218 def description(self):
219 return self.data.description or ""
220
221 @property
222 def parent_id(self):
223 return self.data.parent_id
224
225 @lazy_property
226 def key(self):
227 if not self.data.key_id:
228 return
229
230 return self.pakfire.keys.get_by_id(self.data.key_id)
231
232 @property
233 def arches(self):
234 return self.distro.arches + ["src"]
235
236 def set_mirrored(self, mirrored):
237 self._set_attribute("mirrored", mirrored)
238
239 mirrored = property(lambda s: s.data.mirrored, set_mirrored)
240
241 def set_enabled_for_builds(self, state):
242 self._set_attribute("enabled_for_builds", state)
243
244 enabled_for_builds = property(lambda s: s.data.enabled_for_builds, set_enabled_for_builds)
245
246 @property
247 def score_needed(self):
248 return self.data.score_needed
249
250 @property
251 def time_min(self):
252 return self.data.time_min
253
254 @property
255 def time_max(self):
256 return self.data.time_max
257
258 def set_update_forced(self, update_forced):
259 self._set_attribute("update_forced", update_forced)
260
261 update_forced = property(lambda s: s.data.update_forced, set_update_forced)
262
263 def _log_build(self, action, build, from_repo=None, to_repo=None, user=None):
264 user_id = None
265 if user:
266 user_id = user.id
267
268 from_repo_id = None
269 if from_repo:
270 from_repo_id = from_repo.id
271
272 to_repo_id = None
273 if to_repo:
274 to_repo_id = to_repo.id
275
276 self.db.execute("INSERT INTO repositories_history(action, build_id, from_repo_id, to_repo_id, user_id, time) \
277 VALUES(%s, %s, %s, %s, %s, NOW())", action, build.id, from_repo_id, to_repo_id, user_id)
278
279 def add_build(self, build, user=None, log=True):
280 self.db.execute("INSERT INTO repositories_builds(repo_id, build_id, time_added)"
281 " VALUES(%s, %s, NOW())", self.id, build.id)
282
283 # Update bug status.
284 build._update_bugs_helper(self)
285
286 if log:
287 self._log_build("added", build, to_repo=self, user=user)
288
289 def rem_build(self, build, user=None, log=True):
290 self.db.execute("DELETE FROM repositories_builds \
291 WHERE repo_id = %s AND build_id = %s", self.id, build.id)
292
293 # Force regenerating the index
294 self.update_forced = True
295
296 if log:
297 self._log_build("removed", build, from_repo=self, user=user)
298
299 def move_build(self, build, to_repo, user=None, log=True):
300 self.db.execute("UPDATE repositories_builds SET repo_id = %s, time_added = NOW() \
301 WHERE repo_id = %s AND build_id = %s", to_repo.id, self.id, build.id)
302
303 # Force regenerating the index
304 self.update_forced = True
305
306 # Update bug status.
307 build._update_bugs_helper(to_repo)
308
309 if log:
310 self._log_build("moved", build, from_repo=self, to_repo=to_repo,
311 user=user)
312
313 def get_builds(self, limit=None, offset=None):
314 query = "SELECT build_id AS id FROM repositories_builds \
315 WHERE repo_id = %s ORDER BY time_added DESC"
316 args = [self.id,]
317
318 if limit:
319 if offset:
320 query += " LIMIT %s,%s"
321 args += [offset, limit,]
322 else:
323 query += " LIMIT %s"
324 args += [limit,]
325
326 _builds = []
327 for build in self.db.query(query, *args):
328 build = self.pakfire.builds.get_by_id(build.id)
329 build._repo = self
330
331 _builds.append(build)
332
333 return _builds
334
335 def get_packages(self, arch):
336 if arch == "src":
337 return self.backend.packages._get_packages("SELECT packages.* FROM repositories_builds \
338 LEFT JOIN builds ON repositories_builds.build_id = builds.id \
339 LEFT JOIN packages ON builds.pkg_id = packages.id \
340 WHERE repositories_builds.repo_id = %s", self.id)
341
342 return self.backend.packages._get_packages("SELECT packages.* FROM repositories_builds \
343 LEFT JOIN builds ON repositories_builds.build_id = builds.id \
344 LEFT JOIN jobs ON builds.id = jobs.build_id \
345 LEFT JOIN jobs_packages ON jobs.id = jobs_packages.job_id \
346 LEFT JOIN packages ON jobs_packages.pkg_id = packages.id \
347 WHERE repositories_builds.repo_id = %s \
348 AND (jobs.arch = %s OR jobs.arch = %s) \
349 AND (packages.arch = %s OR packages.arch = %s)",
350 self.id, arch, "noarch", arch, "noarch")
351
352 @property
353 def unpushed_builds(self):
354 return self.backend.builds._get_builds("SELECT builds.* FROM repositories \
355 LEFT JOIN repositories_builds ON repositories.id = repositories_builds.repo_id \
356 LEFT JOIN builds ON repositories_builds.build_id = builds.id \
357 WHERE repositories.id = %s \
358 AND repositories_builds.time_added >= repositories.last_update", self.id)
359
360 def get_obsolete_builds(self):
361 return self.pakfire.builds.get_obsolete(self)
362
363 @property
364 def needs_update(self):
365 if self.unpushed_builds:
366 return True
367
368 return False
369
370 def updated(self):
371 self.db.execute("UPDATE repositories SET last_update = NOW() \
372 WHERE id = %s", self.id)
373
374 # Reset forced update flag
375 self.update_forced = False
376
377 def remaster(self):
378 log.info("Going to update repository %s..." % self.name)
379
380 for arch in self.arches:
381 changed = False
382
383 repo_path = os.path.join(self.path, arch)
384 log.debug(" Path: %s" % repo_path)
385
386 if not os.path.exists(repo_path):
387 os.makedirs(repo_path)
388
389 # Get all packages that are to be included in this repository
390 packages = []
391 for p in self.get_packages(arch):
392 path = os.path.join(repo_path, p.filename)
393 packages.append(path)
394
395 # Nothing to do if the package already exists
396 if os.path.exists(path):
397 continue
398
399 # Copy the package into the repository
400 log.info("Adding %s..." % p)
401 p.copy(repo_path)
402
403 # XXX need to sign the new package here
404
405 # The repository has been changed
406 changed = True
407
408 # No need to regenerate the index if the repository hasn't changed
409 if not changed and not self.update_forced:
410 continue
411
412 # Find the key to sign the package.
413 key_id = None
414 if self.key:
415 key_id = self.key.fingerprint
416
417 # Create package index.
418 p = pakfire.PakfireServer(arch=arch)
419 p.repo_create(repo_path, packages,
420 name="%s - %s.%s" % (self.distro.name, self.name, arch),
421 key_id=key_id)
422
423 # Update the timestamp when we started at last
424 self.updated()
425
426 def cleanup(self):
427 log.info("Cleaning up repository %s..." % self.name)
428
429 for arch in self.arches:
430 repo_path = os.path.join(self.path, arch)
431
432 # Get a list of all files in the repository directory right now
433 filelist = [e for e in os.listdir(repo_path)
434 if os.path.isfile(os.path.join(repo_path, e))]
435
436 # Get a list of all packages that should be in the repository
437 # and remove them from the filelist
438 for p in self.get_packages(arch):
439 try:
440 filelist.remove(p.filename)
441 except ValueError:
442 pass
443
444 # For any files that do not belong into the repository
445 # any more, we will just delete them
446 for filename in filelist:
447 path = os.path.join(repo_path, filename)
448 self.backend.delete_file(path)
449
450 def get_history(self, **kwargs):
451 kwargs.update({
452 "repo" : self,
453 })
454
455 return self.pakfire.repos.get_history(**kwargs)
456
457 def get_build_times(self):
458 times = []
459 for arch in self.arches:
460 time = self.db.get("SELECT SUM(jobs.time_finished - jobs.time_started) AS time FROM jobs \
461 JOIN builds ON builds.id = jobs.build_id \
462 JOIN repositories_builds ON builds.id = repositories_builds.build_id \
463 WHERE (jobs.arch = %s OR jobs.arch = %s) AND \
464 jobs.type = 'build' AND \
465 repositories_builds.repo_id = %s", arch, "noarch", self.id)
466
467 times.append((arch, time.time.total_seconds()))
468
469 return times
470
471
472 class RepositoryAux(base.DataObject):
473 table = "repositories_aux"
474
475 @property
476 def name(self):
477 return self.data.name
478
479 @property
480 def description(self):
481 return self.data.description or ""
482
483 @property
484 def url(self):
485 return self.data.url
486
487 @property
488 def identifier(self):
489 return self.name.lower()
490
491 @property
492 def distro(self):
493 return self.pakfire.distros.get_by_id(self.data.distro_id)
494
495 def get_conf(self, local=False):
496 lines = [
497 "[repo:%s]" % self.identifier,
498 "description = %s - %s" % (self.distro.name, self.name),
499 "enabled = 1",
500 "baseurl = %s" % self.url,
501 "priority = 0",
502 ]
503
504 return "\n".join(lines)