/Makefile
/build-aux
+/intltool-*
/libtool
/missing
/contrib/pakfire.nm
src/pakfire/i18n.py \
src/pakfire/keyring.py \
src/pakfire/logger.py \
- src/pakfire/lzma.py \
src/pakfire/progressbar.py \
src/pakfire/satsolver.py \
src/pakfire/server.py \
# ------------------------------------------------------------------------------
-pkgpyexec_LTLIBRARIES += \
- _lzma.la
-
-_lzma_la_SOURCES = \
- src/_lzma/_lzmamodule.c
-
-_lzma_la_CFLAGS = \
- $(AM_CFLAGS) \
- $(PYTHON_DEVEL_CFLAGS) \
- $(LZMA_CFLAGS)
-
-_lzma_la_LDFLAGS = \
- $(AM_LDFLAGS) \
- -shared \
- -module \
- -avoid-version
-
-_lzma_la_LIBADD = \
- $(PYTHON_DEVEL_LIBS) \
- $(LZMA_LIBS)
-
-# ------------------------------------------------------------------------------
-
pkgpyexec_LTLIBRARIES += \
_pakfire.la
AC_SUBST([OUR_LDFLAGS], $with_ldflags)
# Python
-AM_PATH_PYTHON([2.7])
+AM_PATH_PYTHON([3.4])
save_LIBS="$LIBS"
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2013-10-26 21:51+0200\n"
+"POT-Creation-Date: 2016-11-26 17:51+0100\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
msgid "%s has got no valid signatures"
msgstr ""
-#: ../src/pakfire/actions.py:234
+#: ../src/pakfire/actions.py:235
msgid "Could not handle scriptlet of unknown type. Skipping."
msgstr ""
-#: ../src/pakfire/actions.py:237
+#: ../src/pakfire/actions.py:238
msgid "Executing scriptlet..."
msgstr ""
-#: ../src/pakfire/actions.py:243
+#: ../src/pakfire/actions.py:244
#, python-format
msgid "Cannot run scriptlet because no interpreter is available: %s"
msgstr ""
-#: ../src/pakfire/actions.py:247
+#: ../src/pakfire/actions.py:248
#, python-format
msgid "Cannot run scriptlet because the interpreter is not executable: %s"
msgstr ""
-#: ../src/pakfire/actions.py:286
+#: ../src/pakfire/actions.py:287
#, python-format
msgid ""
"The scriptlet returned an error:\n"
"%s"
msgstr ""
-#: ../src/pakfire/actions.py:289
+#: ../src/pakfire/actions.py:290
#, python-format
msgid "The scriptlet ran more than %s seconds and was killed."
msgstr ""
-#: ../src/pakfire/actions.py:293
+#: ../src/pakfire/actions.py:294
#, python-format
msgid ""
"The scriptlet returned with an unhandled error:\n"
#. This functions creates a fork with then chroots into the
#. pakfire root if necessary and then compiles the given scriptlet
#. code and runs it.
-#: ../src/pakfire/actions.py:307
+#: ../src/pakfire/actions.py:308
msgid "Executing python scriptlet..."
msgstr ""
-#: ../src/pakfire/actions.py:332
+#: ../src/pakfire/actions.py:333
#, python-format
msgid "Exception occured: %s"
msgstr ""
-#: ../src/pakfire/actions.py:405 ../src/pakfire/actions.py:452
-#: ../src/pakfire/actions.py:462 ../src/pakfire/actions.py:483
+#: ../src/pakfire/actions.py:406 ../src/pakfire/actions.py:453
+#: ../src/pakfire/actions.py:463 ../src/pakfire/actions.py:484
#, python-format
msgid "Running transaction test for %s"
msgstr ""
-#: ../src/pakfire/actions.py:415
+#: ../src/pakfire/actions.py:416
msgid "Reinstalling"
msgstr ""
-#: ../src/pakfire/actions.py:417
+#: ../src/pakfire/actions.py:418
msgid "Updating"
msgstr ""
-#: ../src/pakfire/actions.py:419
+#: ../src/pakfire/actions.py:420
msgid "Downgrading"
msgstr ""
-#: ../src/pakfire/actions.py:421
+#: ../src/pakfire/actions.py:422
msgid "Installing"
msgstr ""
-#: ../src/pakfire/actions.py:469
+#: ../src/pakfire/actions.py:470
msgid "Cleanup"
msgstr ""
-#: ../src/pakfire/actions.py:471
+#: ../src/pakfire/actions.py:472
msgid "Removing"
msgstr ""
msgid "Extracting"
msgstr ""
-#: ../src/pakfire/builder.py:781
+#: ../src/pakfire/builder.py:779
msgid "You cannot run a build when no package was given."
msgstr ""
-#: ../src/pakfire/builder.py:785
+#: ../src/pakfire/builder.py:783
#, python-format
msgid "Could not find makefile in build root: %s"
msgstr ""
-#: ../src/pakfire/builder.py:815
+#: ../src/pakfire/builder.py:813
msgid "Build failed"
msgstr ""
-#: ../src/pakfire/builder.py:818
+#: ../src/pakfire/builder.py:816
msgid "Build interrupted"
msgstr ""
-#: ../src/pakfire/builder.py:824
+#: ../src/pakfire/builder.py:822
msgid "Build failed."
msgstr ""
#. End here in case of an error.
-#: ../src/pakfire/builder.py:840
+#: ../src/pakfire/builder.py:838
msgid "The build command failed. See logfile for details."
msgstr ""
-#: ../src/pakfire/builder.py:843
+#: ../src/pakfire/builder.py:841
msgid "Running installation test..."
msgstr ""
-#: ../src/pakfire/builder.py:849
+#: ../src/pakfire/builder.py:847
msgid "Installation test succeeded."
msgstr ""
#. Create a progressbar.
-#: ../src/pakfire/builder.py:892
+#: ../src/pakfire/builder.py:890
msgid "Signing packages..."
msgstr ""
-#: ../src/pakfire/builder.py:926
+#: ../src/pakfire/builder.py:924
msgid "Dumping package information:"
msgstr ""
#. Package the result.
#. Make all these little package from the build environment.
-#: ../src/pakfire/builder.py:1078
+#: ../src/pakfire/builder.py:1076
msgid "Creating packages:"
msgstr ""
#. Execute the buildscript of this stage.
-#: ../src/pakfire/builder.py:1092
+#: ../src/pakfire/builder.py:1090
#, python-format
msgid "Running stage %s:"
msgstr ""
-#: ../src/pakfire/builder.py:1110
+#: ../src/pakfire/builder.py:1108
#, python-format
msgid "Could not remove static libraries: %s"
msgstr ""
-#: ../src/pakfire/builder.py:1116
+#: ../src/pakfire/builder.py:1114
msgid "Compressing man pages did not complete successfully."
msgstr ""
-#: ../src/pakfire/builder.py:1136
+#: ../src/pakfire/builder.py:1134
msgid "Extracting debuginfo did not complete with success. Aborting build."
msgstr ""
msgid "Not set"
msgstr ""
-#: ../src/pakfire/packages/base.py:570
+#: ../src/pakfire/packages/base.py:573
#, python-format
msgid "Config file saved as %s."
msgstr ""
-#: ../src/pakfire/packages/base.py:575
+#: ../src/pakfire/packages/base.py:578
#, python-format
msgid "Preserving datafile '/%s'"
msgstr ""
msgid " Solutions:"
msgstr ""
-#: ../src/pakfire/server.py:279 ../src/pakfire/system.py:149
+#: ../src/pakfire/server.py:279 ../src/pakfire/system.py:153
msgid "Could not be determined"
msgstr ""
+++ /dev/null
-/* _lzma - Low-level Python interface to liblzma.
-
- Initial implementation by Per Øyvind Karlsen.
- Rewritten by Nadeem Vawda.
-
-*/
-
-#define PY_SSIZE_T_CLEAN
-
-#include "Python.h"
-#include "structmember.h"
-#ifdef WITH_THREAD
-#include "pythread.h"
-#endif
-
-#include <stdarg.h>
-#include <string.h>
-
-#include <lzma.h>
-
-
-#ifndef PY_LONG_LONG
-#error "This module requires PY_LONG_LONG to be defined"
-#endif
-
-
-#ifdef WITH_THREAD
-#define ACQUIRE_LOCK(obj) do { \
- if (!PyThread_acquire_lock((obj)->lock, 0)) { \
- Py_BEGIN_ALLOW_THREADS \
- PyThread_acquire_lock((obj)->lock, 1); \
- Py_END_ALLOW_THREADS \
- } } while (0)
-#define RELEASE_LOCK(obj) PyThread_release_lock((obj)->lock)
-#else
-#define ACQUIRE_LOCK(obj)
-#define RELEASE_LOCK(obj)
-#endif
-
-
-/* Container formats: */
-enum {
- FORMAT_AUTO,
- FORMAT_XZ,
- FORMAT_ALONE,
- FORMAT_RAW,
-};
-
-#define LZMA_CHECK_UNKNOWN (LZMA_CHECK_ID_MAX + 1)
-
-
-typedef struct {
- PyObject_HEAD
- lzma_stream lzs;
- int flushed;
-#ifdef WITH_THREAD
- PyThread_type_lock lock;
-#endif
-} Compressor;
-
-typedef struct {
- PyObject_HEAD
- lzma_stream lzs;
- int check;
- char eof;
- PyObject *unused_data;
-#ifdef WITH_THREAD
- PyThread_type_lock lock;
-#endif
-} Decompressor;
-
-/* LZMAError class object. */
-static PyObject *Error;
-
-/* An empty tuple, used by the filter specifier parsing code. */
-static PyObject *empty_tuple;
-
-
-/* Helper functions. */
-
-static int
-catch_lzma_error(lzma_ret lzret)
-{
- switch (lzret) {
- case LZMA_OK:
- case LZMA_GET_CHECK:
- case LZMA_NO_CHECK:
- case LZMA_STREAM_END:
- return 0;
- case LZMA_UNSUPPORTED_CHECK:
- PyErr_SetString(Error, "Unsupported integrity check");
- return 1;
- case LZMA_MEM_ERROR:
- PyErr_NoMemory();
- return 1;
- case LZMA_MEMLIMIT_ERROR:
- PyErr_SetString(Error, "Memory usage limit exceeded");
- return 1;
- case LZMA_FORMAT_ERROR:
- PyErr_SetString(Error, "Input format not supported by decoder");
- return 1;
- case LZMA_OPTIONS_ERROR:
- PyErr_SetString(Error, "Invalid or unsupported options");
- return 1;
- case LZMA_DATA_ERROR:
- PyErr_SetString(Error, "Corrupt input data");
- return 1;
- case LZMA_BUF_ERROR:
- PyErr_SetString(Error, "Insufficient buffer space");
- return 1;
- case LZMA_PROG_ERROR:
- PyErr_SetString(Error, "Internal error");
- return 1;
- default:
- PyErr_Format(Error, "Unrecognized error from liblzma: %d", lzret);
- return 1;
- }
-}
-
-#if BUFSIZ < 8192
-#define INITIAL_BUFFER_SIZE 8192
-#else
-#define INITIAL_BUFFER_SIZE BUFSIZ
-#endif
-
-static int
-grow_buffer(PyObject **buf)
-{
- size_t size = PyBytes_GET_SIZE(*buf);
- return _PyBytes_Resize(buf, size + (size >> 3) + 6);
-}
-
-
-/* Some custom type conversions for PyArg_ParseTupleAndKeywords(),
- since the predefined conversion specifiers do not suit our needs:
-
- uint32_t - the "I" (unsigned int) specifier is the right size, but
- silently ignores overflows on conversion.
-
- lzma_mode and lzma_match_finder - these are enumeration types, and
- so the size of each is implementation-defined. Worse, different
- enum types can be of different sizes within the same program, so
- to be strictly correct, we need to define two separate converters.
- */
-
-#define INT_TYPE_CONVERTER_FUNC(TYPE, FUNCNAME) \
- static int \
- FUNCNAME(PyObject *obj, void *ptr) \
- { \
- unsigned long val; \
- \
- val = PyLong_AsUnsignedLong(obj); \
- if (PyErr_Occurred()) \
- return 0; \
- if ((unsigned long)(TYPE)val != val) { \
- PyErr_SetString(PyExc_OverflowError, \
- "Value too large for " #TYPE " type"); \
- return 0; \
- } \
- *(TYPE *)ptr = val; \
- return 1; \
- }
-
-INT_TYPE_CONVERTER_FUNC(uint32_t, uint32_converter)
-INT_TYPE_CONVERTER_FUNC(lzma_mode, lzma_mode_converter)
-INT_TYPE_CONVERTER_FUNC(lzma_match_finder, lzma_mf_converter)
-
-#undef INT_TYPE_CONVERTER_FUNC
-
-
-/* Filter specifier parsing functions. */
-
-static void *
-parse_filter_spec_lzma(PyObject *spec)
-{
- static char *optnames[] = {"id", "preset", "dict_size", "lc", "lp",
- "pb", "mode", "nice_len", "mf", "depth", NULL};
- PyObject *id;
- PyObject *preset_obj;
- uint32_t preset = LZMA_PRESET_DEFAULT;
- lzma_options_lzma *options;
-
- /* First, fill in default values for all the options using a preset.
- Then, override the defaults with any values given by the caller. */
-
- preset_obj = PyMapping_GetItemString(spec, "preset");
- if (preset_obj == NULL) {
- if (PyErr_ExceptionMatches(PyExc_KeyError))
- PyErr_Clear();
- else
- return NULL;
- } else {
- int ok = uint32_converter(preset_obj, &preset);
- Py_DECREF(preset_obj);
- if (!ok)
- return NULL;
- }
-
- options = (lzma_options_lzma *)PyMem_Malloc(sizeof *options);
- if (options == NULL)
- return PyErr_NoMemory();
- memset(options, 0, sizeof *options);
-
- if (lzma_lzma_preset(options, preset)) {
- PyMem_Free(options);
- PyErr_Format(Error, "lzma_lzma_preset() failed for preset %#x", preset);
- return NULL;
- }
-
- if (!PyArg_ParseTupleAndKeywords(empty_tuple, spec,
- "|OOO&O&O&O&O&O&O&O&", optnames,
- &id, &preset_obj,
- uint32_converter, &options->dict_size,
- uint32_converter, &options->lc,
- uint32_converter, &options->lp,
- uint32_converter, &options->pb,
- lzma_mode_converter, &options->mode,
- uint32_converter, &options->nice_len,
- lzma_mf_converter, &options->mf,
- uint32_converter, &options->depth)) {
- PyErr_SetString(PyExc_ValueError,
- "Invalid filter specifier for LZMA filter");
- PyMem_Free(options);
- options = NULL;
- }
- return options;
-}
-
-static void *
-parse_filter_spec_delta(PyObject *spec)
-{
- static char *optnames[] = {"id", "dist", NULL};
- PyObject *id;
- uint32_t dist = 1;
- lzma_options_delta *options;
-
- if (!PyArg_ParseTupleAndKeywords(empty_tuple, spec, "|OO&", optnames,
- &id, uint32_converter, &dist)) {
- PyErr_SetString(PyExc_ValueError,
- "Invalid filter specifier for delta filter");
- return NULL;
- }
-
- options = (lzma_options_delta *)PyMem_Malloc(sizeof *options);
- if (options == NULL)
- return PyErr_NoMemory();
- memset(options, 0, sizeof *options);
- options->type = LZMA_DELTA_TYPE_BYTE;
- options->dist = dist;
- return options;
-}
-
-static void *
-parse_filter_spec_bcj(PyObject *spec)
-{
- static char *optnames[] = {"id", "start_offset", NULL};
- PyObject *id;
- uint32_t start_offset = 0;
- lzma_options_bcj *options;
-
- if (!PyArg_ParseTupleAndKeywords(empty_tuple, spec, "|OO&", optnames,
- &id, uint32_converter, &start_offset)) {
- PyErr_SetString(PyExc_ValueError,
- "Invalid filter specifier for BCJ filter");
- return NULL;
- }
-
- options = (lzma_options_bcj *)PyMem_Malloc(sizeof *options);
- if (options == NULL)
- return PyErr_NoMemory();
- memset(options, 0, sizeof *options);
- options->start_offset = start_offset;
- return options;
-}
-
-static void *
-parse_filter_spec(lzma_filter *f, PyObject *spec)
-{
- PyObject *id_obj;
-
- if (!PyMapping_Check(spec)) {
- PyErr_SetString(PyExc_TypeError,
- "Filter specifier must be a dict or dict-like object");
- return NULL;
- }
- id_obj = PyMapping_GetItemString(spec, "id");
- if (id_obj == NULL) {
- if (PyErr_ExceptionMatches(PyExc_KeyError))
- PyErr_SetString(PyExc_ValueError,
- "Filter specifier must have an \"id\" entry");
- return NULL;
- }
- f->id = PyLong_AsUnsignedLongLong(id_obj);
- Py_DECREF(id_obj);
- if (PyErr_Occurred())
- return NULL;
-
- switch (f->id) {
- case LZMA_FILTER_LZMA1:
- case LZMA_FILTER_LZMA2:
- f->options = parse_filter_spec_lzma(spec);
- return f->options;
- case LZMA_FILTER_DELTA:
- f->options = parse_filter_spec_delta(spec);
- return f->options;
- case LZMA_FILTER_X86:
- case LZMA_FILTER_POWERPC:
- case LZMA_FILTER_IA64:
- case LZMA_FILTER_ARM:
- case LZMA_FILTER_ARMTHUMB:
- case LZMA_FILTER_SPARC:
- f->options = parse_filter_spec_bcj(spec);
- return f->options;
- default:
- PyErr_Format(PyExc_ValueError, "Invalid filter ID: %llu", f->id);
- return NULL;
- }
-}
-
-static void
-free_filter_chain(lzma_filter filters[])
-{
- int i;
-
- for (i = 0; filters[i].id != LZMA_VLI_UNKNOWN; i++)
- PyMem_Free(filters[i].options);
-}
-
-static int
-parse_filter_chain_spec(lzma_filter filters[], PyObject *filterspecs)
-{
- Py_ssize_t i, num_filters;
-
- num_filters = PySequence_Length(filterspecs);
- if (num_filters == -1)
- return -1;
- if (num_filters > LZMA_FILTERS_MAX) {
- PyErr_Format(PyExc_ValueError,
- "Too many filters - liblzma supports a maximum of %d",
- LZMA_FILTERS_MAX);
- return -1;
- }
-
- for (i = 0; i < num_filters; i++) {
- int ok = 1;
- PyObject *spec = PySequence_GetItem(filterspecs, i);
- if (spec == NULL || parse_filter_spec(&filters[i], spec) == NULL)
- ok = 0;
- Py_XDECREF(spec);
- if (!ok) {
- filters[i].id = LZMA_VLI_UNKNOWN;
- free_filter_chain(filters);
- return -1;
- }
- }
- filters[num_filters].id = LZMA_VLI_UNKNOWN;
- return 0;
-}
-
-
-/* LZMACompressor class. */
-
-static PyObject *
-compress(Compressor *c, uint8_t *data, size_t len, lzma_action action)
-{
- size_t data_size = 0;
- PyObject *result;
-
- result = PyBytes_FromStringAndSize(NULL, INITIAL_BUFFER_SIZE);
- if (result == NULL)
- return NULL;
- c->lzs.next_in = data;
- c->lzs.avail_in = len;
- c->lzs.next_out = (uint8_t *)PyBytes_AS_STRING(result);
- c->lzs.avail_out = PyBytes_GET_SIZE(result);
- for (;;) {
- lzma_ret lzret;
-
- Py_BEGIN_ALLOW_THREADS
- lzret = lzma_code(&c->lzs, action);
- data_size = (char *)c->lzs.next_out - PyBytes_AS_STRING(result);
- Py_END_ALLOW_THREADS
- if (catch_lzma_error(lzret))
- goto error;
- if ((action == LZMA_RUN && c->lzs.avail_in == 0) ||
- (action == LZMA_FINISH && lzret == LZMA_STREAM_END)) {
- break;
- } else if (c->lzs.avail_out == 0) {
- if (grow_buffer(&result) == -1)
- goto error;
- c->lzs.next_out = (uint8_t *)PyBytes_AS_STRING(result) + data_size;
- c->lzs.avail_out = PyBytes_GET_SIZE(result) - data_size;
- }
- }
- if (data_size != PyBytes_GET_SIZE(result))
- if (_PyBytes_Resize(&result, data_size) == -1)
- goto error;
- return result;
-
-error:
- Py_XDECREF(result);
- return NULL;
-}
-
-PyDoc_STRVAR(Compressor_compress_doc,
-"compress(data) -> bytes\n"
-"\n"
-"Provide data to the compressor object. Returns a chunk of\n"
-"compressed data if possible, or b\"\" otherwise.\n"
-"\n"
-"When you have finished providing data to the compressor, call the\n"
-"flush() method to finish the conversion process.\n");
-
-static PyObject *
-Compressor_compress(Compressor *self, PyObject *args)
-{
- Py_buffer buffer;
- PyObject *result = NULL;
-
- if (!PyArg_ParseTuple(args, "s*:compress", &buffer))
- return NULL;
-
- ACQUIRE_LOCK(self);
- if (self->flushed)
- PyErr_SetString(PyExc_ValueError, "Compressor has been flushed");
- else
- result = compress(self, buffer.buf, buffer.len, LZMA_RUN);
- RELEASE_LOCK(self);
- PyBuffer_Release(&buffer);
- return result;
-}
-
-PyDoc_STRVAR(Compressor_flush_doc,
-"flush() -> bytes\n"
-"\n"
-"Finish the compression process. Returns the compressed data left\n"
-"in internal buffers.\n"
-"\n"
-"The compressor object cannot be used after this method is called.\n");
-
-static PyObject *
-Compressor_flush(Compressor *self, PyObject *noargs)
-{
- PyObject *result = NULL;
-
- ACQUIRE_LOCK(self);
- if (self->flushed) {
- PyErr_SetString(PyExc_ValueError, "Repeated call to flush()");
- } else {
- self->flushed = 1;
- result = compress(self, NULL, 0, LZMA_FINISH);
- }
- RELEASE_LOCK(self);
- return result;
-}
-
-static int
-Compressor_init_xz(lzma_stream *lzs, int check, uint32_t preset,
- PyObject *filterspecs)
-{
- lzma_ret lzret;
-
- if (filterspecs == Py_None) {
- lzret = lzma_easy_encoder(lzs, preset, check);
- } else {
- lzma_filter filters[LZMA_FILTERS_MAX + 1];
-
- if (parse_filter_chain_spec(filters, filterspecs) == -1)
- return -1;
- lzret = lzma_stream_encoder(lzs, filters, check);
- free_filter_chain(filters);
- }
- if (catch_lzma_error(lzret))
- return -1;
- else
- return 0;
-}
-
-static int
-Compressor_init_alone(lzma_stream *lzs, uint32_t preset, PyObject *filterspecs)
-{
- lzma_ret lzret;
-
- if (filterspecs == Py_None) {
- lzma_options_lzma options;
-
- if (lzma_lzma_preset(&options, preset)) {
- PyErr_Format(Error, "Invalid compression preset: %#x", preset);
- return -1;
- }
- lzret = lzma_alone_encoder(lzs, &options);
- } else {
- lzma_filter filters[LZMA_FILTERS_MAX + 1];
-
- if (parse_filter_chain_spec(filters, filterspecs) == -1)
- return -1;
- if (filters[0].id == LZMA_FILTER_LZMA1 &&
- filters[1].id == LZMA_VLI_UNKNOWN) {
- lzret = lzma_alone_encoder(lzs, filters[0].options);
- } else {
- PyErr_SetString(PyExc_ValueError,
- "Invalid filter chain for FORMAT_ALONE - "
- "must be a single LZMA1 filter");
- lzret = LZMA_PROG_ERROR;
- }
- free_filter_chain(filters);
- }
- if (PyErr_Occurred() || catch_lzma_error(lzret))
- return -1;
- else
- return 0;
-}
-
-static int
-Compressor_init_raw(lzma_stream *lzs, PyObject *filterspecs)
-{
- lzma_filter filters[LZMA_FILTERS_MAX + 1];
- lzma_ret lzret;
-
- if (filterspecs == Py_None) {
- PyErr_SetString(PyExc_ValueError,
- "Must specify filters for FORMAT_RAW");
- return -1;
- }
- if (parse_filter_chain_spec(filters, filterspecs) == -1)
- return -1;
- lzret = lzma_raw_encoder(lzs, filters);
- free_filter_chain(filters);
- if (catch_lzma_error(lzret))
- return -1;
- else
- return 0;
-}
-
-static int
-Compressor_init(Compressor *self, PyObject *args, PyObject *kwargs)
-{
- static char *arg_names[] = {"format", "check", "preset", "filters", NULL};
- int format = FORMAT_XZ;
- int check = -1;
- uint32_t preset = LZMA_PRESET_DEFAULT;
- PyObject *preset_obj = Py_None;
- PyObject *filterspecs = Py_None;
-
- if (!PyArg_ParseTupleAndKeywords(args, kwargs,
- "|iiOO:LZMACompressor", arg_names,
- &format, &check, &preset_obj,
- &filterspecs))
- return -1;
-
- if (format != FORMAT_XZ && check != -1 && check != LZMA_CHECK_NONE) {
- PyErr_SetString(PyExc_ValueError,
- "Integrity checks are only supported by FORMAT_XZ");
- return -1;
- }
-
- if (preset_obj != Py_None && filterspecs != Py_None) {
- PyErr_SetString(PyExc_ValueError,
- "Cannot specify both preset and filter chain");
- return -1;
- }
-
- if (preset_obj != Py_None)
- if (!uint32_converter(preset_obj, &preset))
- return -1;
-
-#ifdef WITH_THREAD
- self->lock = PyThread_allocate_lock();
- if (self->lock == NULL) {
- PyErr_SetString(PyExc_MemoryError, "Unable to allocate lock");
- return -1;
- }
-#endif
-
- self->flushed = 0;
- switch (format) {
- case FORMAT_XZ:
- if (check == -1)
- check = LZMA_CHECK_CRC64;
- if (Compressor_init_xz(&self->lzs, check, preset, filterspecs) != 0)
- break;
- return 0;
-
- case FORMAT_ALONE:
- if (Compressor_init_alone(&self->lzs, preset, filterspecs) != 0)
- break;
- return 0;
-
- case FORMAT_RAW:
- if (Compressor_init_raw(&self->lzs, filterspecs) != 0)
- break;
- return 0;
-
- default:
- PyErr_Format(PyExc_ValueError,
- "Invalid container format: %d", format);
- break;
- }
-
-#ifdef WITH_THREAD
- PyThread_free_lock(self->lock);
- self->lock = NULL;
-#endif
- return -1;
-}
-
-static void
-Compressor_dealloc(Compressor *self)
-{
- lzma_end(&self->lzs);
-#ifdef WITH_THREAD
- if (self->lock != NULL)
- PyThread_free_lock(self->lock);
-#endif
- Py_TYPE(self)->tp_free((PyObject *)self);
-}
-
-static PyMethodDef Compressor_methods[] = {
- {"compress", (PyCFunction)Compressor_compress, METH_VARARGS,
- Compressor_compress_doc},
- {"flush", (PyCFunction)Compressor_flush, METH_NOARGS,
- Compressor_flush_doc},
- {NULL}
-};
-
-PyDoc_STRVAR(Compressor_doc,
-"LZMACompressor(format=FORMAT_XZ, check=-1, preset=None, filters=None)\n"
-"\n"
-"Create a compressor object for compressing data incrementally.\n"
-"\n"
-"format specifies the container format to use for the output. This can\n"
-"be FORMAT_XZ (default), FORMAT_ALONE, or FORMAT_RAW.\n"
-"\n"
-"check specifies the integrity check to use. For FORMAT_XZ, the default\n"
-"is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not suport integrity\n"
-"checks; for these formats, check must be omitted, or be CHECK_NONE.\n"
-"\n"
-"The settings used by the compressor can be specified either as a\n"
-"preset compression level (with the 'preset' argument), or in detail\n"
-"as a custom filter chain (with the 'filters' argument). For FORMAT_XZ\n"
-"and FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset\n"
-"level. For FORMAT_RAW, the caller must always specify a filter chain;\n"
-"the raw compressor does not support preset compression levels.\n"
-"\n"
-"preset (if provided) should be an integer in the range 0-9, optionally\n"
-"OR-ed with the constant PRESET_EXTREME.\n"
-"\n"
-"filters (if provided) should be a sequence of dicts. Each dict should\n"
-"have an entry for \"id\" indicating the ID of the filter, plus\n"
-"additional entries for options to the filter.\n"
-"\n"
-"For one-shot compression, use the compress() function instead.\n");
-
-static PyTypeObject Compressor_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "_lzma.LZMACompressor", /* tp_name */
- sizeof(Compressor), /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor)Compressor_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- Compressor_doc, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- Compressor_methods, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)Compressor_init, /* tp_init */
- 0, /* tp_alloc */
- PyType_GenericNew, /* tp_new */
-};
-
-
-/* LZMADecompressor class. */
-
-static PyObject *
-decompress(Decompressor *d, uint8_t *data, size_t len)
-{
- size_t data_size = 0;
- PyObject *result;
-
- result = PyBytes_FromStringAndSize(NULL, INITIAL_BUFFER_SIZE);
- if (result == NULL)
- return NULL;
- d->lzs.next_in = data;
- d->lzs.avail_in = len;
- d->lzs.next_out = (uint8_t *)PyBytes_AS_STRING(result);
- d->lzs.avail_out = PyBytes_GET_SIZE(result);
- for (;;) {
- lzma_ret lzret;
-
- Py_BEGIN_ALLOW_THREADS
- lzret = lzma_code(&d->lzs, LZMA_RUN);
- data_size = (char *)d->lzs.next_out - PyBytes_AS_STRING(result);
- Py_END_ALLOW_THREADS
- if (catch_lzma_error(lzret))
- goto error;
- if (lzret == LZMA_GET_CHECK || lzret == LZMA_NO_CHECK)
- d->check = lzma_get_check(&d->lzs);
- if (lzret == LZMA_STREAM_END) {
- d->eof = 1;
- if (d->lzs.avail_in > 0) {
- Py_CLEAR(d->unused_data);
- d->unused_data = PyBytes_FromStringAndSize(
- (char *)d->lzs.next_in, d->lzs.avail_in);
- if (d->unused_data == NULL)
- goto error;
- }
- break;
- } else if (d->lzs.avail_in == 0) {
- break;
- } else if (d->lzs.avail_out == 0) {
- if (grow_buffer(&result) == -1)
- goto error;
- d->lzs.next_out = (uint8_t *)PyBytes_AS_STRING(result) + data_size;
- d->lzs.avail_out = PyBytes_GET_SIZE(result) - data_size;
- }
- }
- if (data_size != PyBytes_GET_SIZE(result))
- if (_PyBytes_Resize(&result, data_size) == -1)
- goto error;
- return result;
-
-error:
- Py_XDECREF(result);
- return NULL;
-}
-
-PyDoc_STRVAR(Decompressor_decompress_doc,
-"decompress(data) -> bytes\n"
-"\n"
-"Provide data to the decompressor object. Returns a chunk of\n"
-"decompressed data if possible, or b\"\" otherwise.\n"
-"\n"
-"Attempting to decompress data after the end of the stream is\n"
-"reached raises an EOFError. Any data found after the end of the\n"
-"stream is ignored, and saved in the unused_data attribute.\n");
-
-static PyObject *
-Decompressor_decompress(Decompressor *self, PyObject *args)
-{
- Py_buffer buffer;
- PyObject *result = NULL;
-
- if (!PyArg_ParseTuple(args, "s*:decompress", &buffer))
- return NULL;
-
- ACQUIRE_LOCK(self);
- if (self->eof)
- PyErr_SetString(PyExc_EOFError, "Already at end of stream");
- else
- result = decompress(self, buffer.buf, buffer.len);
- RELEASE_LOCK(self);
- PyBuffer_Release(&buffer);
- return result;
-}
-
-static int
-Decompressor_init_raw(lzma_stream *lzs, PyObject *filterspecs)
-{
- lzma_filter filters[LZMA_FILTERS_MAX + 1];
- lzma_ret lzret;
-
- if (parse_filter_chain_spec(filters, filterspecs) == -1)
- return -1;
- lzret = lzma_raw_decoder(lzs, filters);
- free_filter_chain(filters);
- if (catch_lzma_error(lzret))
- return -1;
- else
- return 0;
-}
-
-static int
-Decompressor_init(Decompressor *self, PyObject *args, PyObject *kwargs)
-{
- static char *arg_names[] = {"format", "memlimit", "filters", NULL};
- const uint32_t decoder_flags = LZMA_TELL_ANY_CHECK | LZMA_TELL_NO_CHECK;
- int format = FORMAT_AUTO;
- uint64_t memlimit = UINT64_MAX;
- PyObject *memlimit_obj = Py_None;
- PyObject *filterspecs = Py_None;
- lzma_ret lzret;
-
- if (!PyArg_ParseTupleAndKeywords(args, kwargs,
- "|iOO:LZMADecompressor", arg_names,
- &format, &memlimit_obj, &filterspecs))
- return -1;
-
- if (memlimit_obj != Py_None) {
- if (format == FORMAT_RAW) {
- PyErr_SetString(PyExc_ValueError,
- "Cannot specify memory limit with FORMAT_RAW");
- return -1;
- }
- memlimit = PyLong_AsUnsignedLongLong(memlimit_obj);
- if (PyErr_Occurred())
- return -1;
- }
-
- if (format == FORMAT_RAW && filterspecs == Py_None) {
- PyErr_SetString(PyExc_ValueError,
- "Must specify filters for FORMAT_RAW");
- return -1;
- } else if (format != FORMAT_RAW && filterspecs != Py_None) {
- PyErr_SetString(PyExc_ValueError,
- "Cannot specify filters except with FORMAT_RAW");
- return -1;
- }
-
-#ifdef WITH_THREAD
- self->lock = PyThread_allocate_lock();
- if (self->lock == NULL) {
- PyErr_SetString(PyExc_MemoryError, "Unable to allocate lock");
- return -1;
- }
-#endif
-
- self->check = LZMA_CHECK_UNKNOWN;
- self->unused_data = PyBytes_FromStringAndSize(NULL, 0);
- if (self->unused_data == NULL)
- goto error;
-
- switch (format) {
- case FORMAT_AUTO:
- lzret = lzma_auto_decoder(&self->lzs, memlimit, decoder_flags);
- if (catch_lzma_error(lzret))
- break;
- return 0;
-
- case FORMAT_XZ:
- lzret = lzma_stream_decoder(&self->lzs, memlimit, decoder_flags);
- if (catch_lzma_error(lzret))
- break;
- return 0;
-
- case FORMAT_ALONE:
- self->check = LZMA_CHECK_NONE;
- lzret = lzma_alone_decoder(&self->lzs, memlimit);
- if (catch_lzma_error(lzret))
- break;
- return 0;
-
- case FORMAT_RAW:
- self->check = LZMA_CHECK_NONE;
- if (Decompressor_init_raw(&self->lzs, filterspecs) == -1)
- break;
- return 0;
-
- default:
- PyErr_Format(PyExc_ValueError,
- "Invalid container format: %d", format);
- break;
- }
-
-error:
- Py_CLEAR(self->unused_data);
-#ifdef WITH_THREAD
- PyThread_free_lock(self->lock);
- self->lock = NULL;
-#endif
- return -1;
-}
-
-static void
-Decompressor_dealloc(Decompressor *self)
-{
- lzma_end(&self->lzs);
- Py_CLEAR(self->unused_data);
-#ifdef WITH_THREAD
- if (self->lock != NULL)
- PyThread_free_lock(self->lock);
-#endif
- Py_TYPE(self)->tp_free((PyObject *)self);
-}
-
-static PyMethodDef Decompressor_methods[] = {
- {"decompress", (PyCFunction)Decompressor_decompress, METH_VARARGS,
- Decompressor_decompress_doc},
- {NULL}
-};
-
-PyDoc_STRVAR(Decompressor_check_doc,
-"ID of the integrity check used by the input stream.");
-
-PyDoc_STRVAR(Decompressor_eof_doc,
-"True if the end-of-stream marker has been reached.");
-
-PyDoc_STRVAR(Decompressor_unused_data_doc,
-"Data found after the end of the compressed stream.");
-
-static PyMemberDef Decompressor_members[] = {
- {"check", T_INT, offsetof(Decompressor, check), READONLY,
- Decompressor_check_doc},
- {"eof", T_BOOL, offsetof(Decompressor, eof), READONLY,
- Decompressor_eof_doc},
- {"unused_data", T_OBJECT_EX, offsetof(Decompressor, unused_data), READONLY,
- Decompressor_unused_data_doc},
- {NULL}
-};
-
-PyDoc_STRVAR(Decompressor_doc,
-"LZMADecompressor(format=FORMAT_AUTO, memlimit=None, filters=None)\n"
-"\n"
-"Create a decompressor object for decompressing data incrementally.\n"
-"\n"
-"format specifies the container format of the input stream. If this is\n"
-"FORMAT_AUTO (the default), the decompressor will automatically detect\n"
-"whether the input is FORMAT_XZ or FORMAT_ALONE. Streams created with\n"
-"FORMAT_RAW cannot be autodetected.\n"
-"\n"
-"memlimit can be specified to limit the amount of memory used by the\n"
-"decompressor. This will cause decompression to fail if the input\n"
-"cannot be decompressed within the given limit.\n"
-"\n"
-"filters specifies a custom filter chain. This argument is required for\n"
-"FORMAT_RAW, and not accepted with any other format. When provided,\n"
-"this should be a sequence of dicts, each indicating the ID and options\n"
-"for a single filter.\n"
-"\n"
-"For one-shot decompression, use the decompress() function instead.\n");
-
-static PyTypeObject Decompressor_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "_lzma.LZMADecompressor", /* tp_name */
- sizeof(Decompressor), /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor)Decompressor_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- Decompressor_doc, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- Decompressor_methods, /* tp_methods */
- Decompressor_members, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)Decompressor_init, /* tp_init */
- 0, /* tp_alloc */
- PyType_GenericNew, /* tp_new */
-};
-
-
-/* Module-level functions. */
-
-PyDoc_STRVAR(check_is_supported_doc,
-"check_is_supported(check_id) -> bool\n"
-"\n"
-"Test whether the given integrity check is supported.\n"
-"\n"
-"Always returns True for CHECK_NONE and CHECK_CRC32.\n");
-
-static PyObject *
-check_is_supported(PyObject *self, PyObject *args)
-{
- int check_id;
-
- if (!PyArg_ParseTuple(args, "i:check_is_supported", &check_id))
- return NULL;
-
- return PyBool_FromLong(lzma_check_is_supported(check_id));
-}
-
-
-/* Module initialization. */
-
-static PyMethodDef module_methods[] = {
- {"check_is_supported", (PyCFunction)check_is_supported,
- METH_VARARGS, check_is_supported_doc},
- {NULL}
-};
-
-/* Some of our constants are more than 32 bits wide, so PyModule_AddIntConstant
- would not work correctly on platforms with 32-bit longs. */
-static int
-module_add_int_constant(PyObject *m, const char *name, PY_LONG_LONG value)
-{
- PyObject *o = PyLong_FromLongLong(value);
- if (o == NULL)
- return -1;
- if (PyModule_AddObject(m, name, o) == 0)
- return 0;
- Py_DECREF(o);
- return -1;
-}
-
-#define ADD_INT_PREFIX_MACRO(m, macro) \
- module_add_int_constant(m, #macro, LZMA_ ## macro)
-
-void init_lzma(void)
-{
- PyObject *m;
-
- empty_tuple = PyTuple_New(0);
- if (empty_tuple == NULL)
- return;
-
- m = Py_InitModule("_lzma", module_methods);
- if (m == NULL)
- return;
-
- if (PyModule_AddIntMacro(m, FORMAT_AUTO) == -1 ||
- PyModule_AddIntMacro(m, FORMAT_XZ) == -1 ||
- PyModule_AddIntMacro(m, FORMAT_ALONE) == -1 ||
- PyModule_AddIntMacro(m, FORMAT_RAW) == -1 ||
- ADD_INT_PREFIX_MACRO(m, CHECK_NONE) == -1 ||
- ADD_INT_PREFIX_MACRO(m, CHECK_CRC32) == -1 ||
- ADD_INT_PREFIX_MACRO(m, CHECK_CRC64) == -1 ||
- ADD_INT_PREFIX_MACRO(m, CHECK_SHA256) == -1 ||
- ADD_INT_PREFIX_MACRO(m, CHECK_ID_MAX) == -1 ||
- ADD_INT_PREFIX_MACRO(m, CHECK_UNKNOWN) == -1 ||
- ADD_INT_PREFIX_MACRO(m, FILTER_LZMA1) == -1 ||
- ADD_INT_PREFIX_MACRO(m, FILTER_LZMA2) == -1 ||
- ADD_INT_PREFIX_MACRO(m, FILTER_DELTA) == -1 ||
- ADD_INT_PREFIX_MACRO(m, FILTER_X86) == -1 ||
- ADD_INT_PREFIX_MACRO(m, FILTER_IA64) == -1 ||
- ADD_INT_PREFIX_MACRO(m, FILTER_ARM) == -1 ||
- ADD_INT_PREFIX_MACRO(m, FILTER_ARMTHUMB) == -1 ||
- ADD_INT_PREFIX_MACRO(m, FILTER_SPARC) == -1 ||
- ADD_INT_PREFIX_MACRO(m, FILTER_POWERPC) == -1 ||
- ADD_INT_PREFIX_MACRO(m, MF_HC3) == -1 ||
- ADD_INT_PREFIX_MACRO(m, MF_HC4) == -1 ||
- ADD_INT_PREFIX_MACRO(m, MF_BT2) == -1 ||
- ADD_INT_PREFIX_MACRO(m, MF_BT3) == -1 ||
- ADD_INT_PREFIX_MACRO(m, MF_BT4) == -1 ||
- ADD_INT_PREFIX_MACRO(m, MODE_FAST) == -1 ||
- ADD_INT_PREFIX_MACRO(m, MODE_NORMAL) == -1 ||
- ADD_INT_PREFIX_MACRO(m, PRESET_DEFAULT) == -1 ||
- ADD_INT_PREFIX_MACRO(m, PRESET_EXTREME) == -1)
- return;
-
- Error = PyErr_NewExceptionWithDoc(
- "_lzma.LZMAError", "Call to liblzma failed.", NULL, NULL);
- if (Error == NULL)
- return;
- Py_INCREF(Error);
- if (PyModule_AddObject(m, "LZMAError", Error) == -1)
- return;
-
- if (PyType_Ready(&Compressor_type) == -1)
- return;
- Py_INCREF(&Compressor_type);
- if (PyModule_AddObject(m, "LZMACompressor",
- (PyObject *)&Compressor_type) == -1)
- return;
-
- if (PyType_Ready(&Decompressor_type) == -1)
- return;
- Py_INCREF(&Decompressor_type);
- if (PyModule_AddObject(m, "LZMADecompressor",
- (PyObject *)&Decompressor_type) == -1)
- return;
-
- return m;
-}
{ NULL, NULL, 0, NULL }
};
-void init_pakfire(void) {
+static struct PyModuleDef moduledef = {
+ .m_base = PyModuleDef_HEAD_INIT,
+ .m_name = "_pakfire",
+ .m_size = -1,
+ .m_methods = pakfireModuleMethods,
+};
+
+PyMODINIT_FUNC PyInit__pakfire(void) {
/* Initialize locale */
setlocale(LC_ALL, "");
bindtextdomain(PACKAGE_TARNAME, "/usr/share/locale");
textdomain(PACKAGE_TARNAME);
- /* Load the python module */
- PyObject *m, *d;
-
- m = Py_InitModule("_pakfire", pakfireModuleMethods);
+ // Create the module
+ PyObject* module = PyModule_Create(&moduledef);
+ if (!module)
+ return NULL;
// Pool
PoolType.tp_methods = Pool_methods;
if (PyType_Ready(&PoolType) < 0)
- return;
+ return NULL;
Py_INCREF(&PoolType);
- PyModule_AddObject(m, "Pool", (PyObject *)&PoolType);
+ PyModule_AddObject(module, "Pool", (PyObject *)&PoolType);
// Problem
ProblemType.tp_methods = Problem_methods;
if (PyType_Ready(&ProblemType) < 0)
- return;
+ return NULL;
Py_INCREF(&ProblemType);
- PyModule_AddObject(m, "Problem", (PyObject *)&ProblemType);
+ PyModule_AddObject(module, "Problem", (PyObject *)&ProblemType);
// Repo
RepoType.tp_methods = Repo_methods;
if (PyType_Ready(&RepoType) < 0)
- return;
+ return NULL;
Py_INCREF(&RepoType);
- PyModule_AddObject(m, "Repo", (PyObject *)&RepoType);
+ PyModule_AddObject(module, "Repo", (PyObject *)&RepoType);
// Solvable
SolvableType.tp_methods = Solvable_methods;
if (PyType_Ready(&SolvableType) < 0)
- return;
+ return NULL;
Py_INCREF(&SolvableType);
- PyModule_AddObject(m, "Solvable", (PyObject *)&SolvableType);
+ PyModule_AddObject(module, "Solvable", (PyObject *)&SolvableType);
// Relation
RelationType.tp_methods = Relation_methods;
if (PyType_Ready(&RelationType) < 0)
- return;
+ return NULL;
Py_INCREF(&RelationType);
- PyModule_AddObject(m, "Relation", (PyObject *)&RelationType);
+ PyModule_AddObject(module, "Relation", (PyObject *)&RelationType);
// Request
RequestType.tp_methods = Request_methods;
if (PyType_Ready(&RequestType) < 0)
- return;
+ return NULL;
Py_INCREF(&RequestType);
- PyModule_AddObject(m, "Request", (PyObject *)&RequestType);
+ PyModule_AddObject(module, "Request", (PyObject *)&RequestType);
// Solution
SolutionType.tp_methods = Solution_methods;
if (PyType_Ready(&SolutionType) < 0)
- return;
+ return NULL;
Py_INCREF(&SolutionType);
- PyModule_AddObject(m, "Solution", (PyObject *)&SolutionType);
+ PyModule_AddObject(module, "Solution", (PyObject *)&SolutionType);
// Solver
SolverType.tp_methods = Solver_methods;
if (PyType_Ready(&SolverType) < 0)
- return;
+ return NULL;
Py_INCREF(&SolverType);
- PyModule_AddObject(m, "Solver", (PyObject *)&SolverType);
+ PyModule_AddObject(module, "Solver", (PyObject *)&SolverType);
// Step
StepType.tp_methods = Step_methods;
if (PyType_Ready(&StepType) < 0)
- return;
+ return NULL;
Py_INCREF(&StepType);
- PyModule_AddObject(m, "Step", (PyObject *)&StepType);
+ PyModule_AddObject(module, "Step", (PyObject *)&StepType);
// Transaction
TransactionType.tp_methods = Transaction_methods;
if (PyType_Ready(&TransactionType) < 0)
- return;
+ return NULL;
Py_INCREF(&TransactionType);
- PyModule_AddObject(m, "Transaction", (PyObject *)&TransactionType);
+ PyModule_AddObject(module, "Transaction", (PyObject *)&TransactionType);
// Add constants
- d = PyModule_GetDict(m);
+ PyObject* d = PyModule_GetDict(module);
// Personalities
PyDict_SetItemString(d, "PERSONALITY_LINUX", Py_BuildValue("i", PER_LINUX));
PyDict_SetItemString(d, "SOLVER_FLAG_NO_UPDATEPROVIDE", Py_BuildValue("i", SOLVER_FLAG_NO_UPDATEPROVIDE));
PyDict_SetItemString(d, "SOLVER_FLAG_SPLITPROVIDES", Py_BuildValue("i", SOLVER_FLAG_SPLITPROVIDES));
PyDict_SetItemString(d, "SOLVER_FLAG_IGNORE_RECOMMENDED", Py_BuildValue("i", SOLVER_FLAG_IGNORE_RECOMMENDED));
+
+ return module;
}
#include "solvable.h"
PyTypeObject PoolType = {
- PyObject_HEAD_INIT(NULL)
+ PyVarObject_HEAD_INIT(NULL, 0)
tp_name: "_pakfire.Pool",
tp_basicsize: sizeof(PoolObject),
tp_flags: Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
PyObject *Pool_dealloc(PoolObject *self) {
pool_free(self->_pool);
- self->ob_type->tp_free((PyObject *)self);
+ Py_TYPE(self)->tp_free((PyObject *)self);
Py_RETURN_NONE;
}
#include "solver.h"
PyTypeObject ProblemType = {
- PyObject_HEAD_INIT(NULL)
+ PyVarObject_HEAD_INIT(NULL, 0)
tp_name: "_pakfire.Problem",
tp_basicsize: sizeof(ProblemObject),
tp_flags: Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
}
PyObject *Problem_dealloc(ProblemObject *self) {
- self->ob_type->tp_free((PyObject *)self);
+ Py_TYPE(self)->tp_free((PyObject *)self);
Py_RETURN_NONE;
}
#define REL_NONE 0
PyTypeObject RelationType = {
- PyObject_HEAD_INIT(NULL)
+ PyVarObject_HEAD_INIT(NULL, 0)
tp_name: "_pakfire.Relation",
tp_basicsize: sizeof(RelationObject),
tp_flags: Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
}
PyObject *Relation_dealloc(RelationObject *self) {
- self->ob_type->tp_free((PyObject *)self);
+ Py_TYPE(self)->tp_free((PyObject *)self);
Py_RETURN_NONE;
}
#include "solvable.h"
PyTypeObject RepoType = {
- PyObject_HEAD_INIT(NULL)
+ PyVarObject_HEAD_INIT(NULL, 0)
tp_name: "_pakfire.Repo",
tp_basicsize: sizeof(RepoObject),
tp_flags: Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
}
PyObject *Repo_dealloc(RepoObject *self) {
- self->ob_type->tp_free((PyObject *)self);
+ Py_TYPE(self)->tp_free((PyObject *)self);
Py_RETURN_NONE;
}
#include <solv/solver.h>
PyTypeObject RequestType = {
- PyObject_HEAD_INIT(NULL)
+ PyVarObject_HEAD_INIT(NULL, 0)
tp_name: "_pakfire.Request",
tp_basicsize: sizeof(RequestObject),
tp_flags: Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
}
PyObject *Request_dealloc(RequestObject *self) {
- self->ob_type->tp_free((PyObject *)self);
+ Py_TYPE(self)->tp_free((PyObject *)self);
Py_RETURN_NONE;
}
#include "solution.h"
PyTypeObject SolutionType = {
- PyObject_HEAD_INIT(NULL)
+ PyVarObject_HEAD_INIT(NULL, 0)
tp_name: "_pakfire.Solution",
tp_basicsize: sizeof(SolutionObject),
tp_flags: Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
}
PyObject *Solution_dealloc(SolutionObject *self) {
- self->ob_type->tp_free((PyObject *)self);
+ Py_TYPE(self)->tp_free((PyObject *)self);
Py_RETURN_NONE;
}
#include "solvable.h"
PyTypeObject SolvableType = {
- PyObject_HEAD_INIT(NULL)
+ PyVarObject_HEAD_INIT(NULL, 0)
tp_name: "_pakfire.Solvable",
tp_basicsize: sizeof(SolvableObject),
tp_flags: Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
}
PyObject *Solvable_dealloc(SolvableObject *self) {
- self->ob_type->tp_free((PyObject *)self);
+ Py_TYPE(self)->tp_free((PyObject *)self);
Py_RETURN_NONE;
}
#include <solv/solverdebug.h>
PyTypeObject SolverType = {
- PyObject_HEAD_INIT(NULL)
+ PyVarObject_HEAD_INIT(NULL, 0)
tp_name: "_pakfire.Solver",
tp_basicsize: sizeof(SolverObject),
tp_flags: Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
PyObject *Solver_dealloc(SolverObject *self) {
solver_free(self->_solver);
- self->ob_type->tp_free((PyObject *)self);
+ Py_TYPE(self)->tp_free((PyObject *)self);
Py_RETURN_NONE;
}
#include "transaction.h"
PyTypeObject StepType = {
- PyObject_HEAD_INIT(NULL)
+ PyVarObject_HEAD_INIT(NULL, 0)
tp_name: "_pakfire.Step",
tp_basicsize: sizeof(StepObject),
tp_flags: Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
}
PyObject *Step_dealloc(StepObject *self) {
- self->ob_type->tp_free((PyObject *)self);
+ Py_TYPE(self)->tp_free((PyObject *)self);
Py_RETURN_NONE;
}
#include "transaction.h"
PyTypeObject TransactionType = {
- PyObject_HEAD_INIT(NULL)
+ PyVarObject_HEAD_INIT(NULL, 0)
tp_name: "_pakfire.Transaction",
tp_basicsize: sizeof(TransactionObject),
tp_flags: Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
PyObject *Transaction_dealloc(TransactionObject *self) {
/* XXX need to free self->_transaction */
- self->ob_type->tp_free((PyObject *)self);
+ Py_TYPE(self)->tp_free((PyObject *)self);
Py_RETURN_NONE;
}
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
# #
###############################################################################
-from base import Pakfire, PakfireBuilder, PakfireServer
-
-from constants import PAKFIRE_VERSION
+from . import base
+from .constants import PAKFIRE_VERSION
__version__ = PAKFIRE_VERSION
+
+Pakfire = base.Pakfire
+PakfireBuilder = base.PakfireBuilder
+PakfireServer = base.PakfireServer
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
import os
import sys
-import packages
-import shell
-import util
+from . import packages
+from . import shell
+from . import util
import logging
log = logging.getLogger("pakfire")
-from constants import *
-from i18n import _
+from .constants import *
+from .i18n import _
class Action(object):
def __init__(self, pakfire, pkg_solv, pkg_bin=None):
# Check if there are any signatures at all.
if not self.pkg.signatures:
- raise SignatureError, _("%s has got no signatures") % self.pkg.friendly_name
+ raise SignatureError(_("%s has got no signatures") % self.pkg.friendly_name)
# Run the verification process and save the result.
sigs = self.pkg.verify()
if not sigs:
- raise SignatureError, _("%s has got no valid signatures") % self.pkg.friendly_name
+ raise SignatureError(_("%s has got no valid signatures") % self.pkg.friendly_name)
@property
def pkg(self):
def _set_pkg_bin(self, pkg):
if pkg and not self.pkg_solv.uuid == pkg.uuid:
- raise RuntimeError, "Not the same package: %s != %s" % (self.pkg_solv, pkg)
+ raise RuntimeError("Not the same package: %s != %s" % (self.pkg_solv, pkg))
self._pkg_bin = pkg
self.run_python()
else:
- raise ActionError, _("Could not handle scriptlet of unknown type. Skipping.")
+ raise ActionError(_("Could not handle scriptlet of unknown type. Skipping."))
def run_exec(self):
log.debug(_("Executing scriptlet..."))
if self.interpreter:
interpreter = "%s/%s" % (self.pakfire.path, self.interpreter)
if not os.path.exists(interpreter):
- raise ActionError, _("Cannot run scriptlet because no interpreter is available: %s" \
- % self.interpreter)
+ raise ActionError(_("Cannot run scriptlet because no interpreter is available: %s" \
+ % self.interpreter))
if not os.access(interpreter, os.X_OK):
- raise ActionError, _("Cannot run scriptlet because the interpreter is not executable: %s" \
- % self.interpreter)
+ raise ActionError(_("Cannot run scriptlet because the interpreter is not executable: %s" \
+ % self.interpreter))
# Create a name for the temporary script file.
script_file_chroot = os.path.join("/", LOCAL_TMP_PATH,
try:
self.execute(command)
- except ShellEnvironmentError, e:
- raise ActionError, _("The scriptlet returned an error:\n%s" % e)
+ except ShellEnvironmentError as e:
+ raise ActionError(_("The scriptlet returned an error:\n%s" % e))
except commandTimeoutExpired:
- raise ActionError, _("The scriptlet ran more than %s seconds and was killed." \
- % SCRIPTLET_TIMEOUT)
+ raise ActionError(_("The scriptlet ran more than %s seconds and was killed." \
+ % SCRIPTLET_TIMEOUT))
- except Exception, e:
- raise ActionError, _("The scriptlet returned with an unhandled error:\n%s" % e)
+ except Exception as e:
+ raise ActionError(_("The scriptlet returned with an unhandled error:\n%s" % e))
finally:
# Remove the script file.
obj = compile(self.scriptlet, "<string>", "exec")
eval(obj, _globals, {})
- except Exception, e:
- print _("Exception occured: %s") % e
+ except Exception as e:
+ print(_("Exception occured: %s") % e)
os._exit(1)
# End the child process without cleaning up.
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
import random
import string
-import actions
-import builder
-import config
-import distro
-import filelist
-import keyring
-import logger
-import packages
-import repository
-import satsolver
-import transaction
-import util
+from . import actions
+from . import builder
+from . import config
+from . import distro
+from . import filelist
+from . import keyring
+from . import logger
+from . import packages
+from . import repository
+from . import satsolver
+from . import transaction
+from . import util
import logging
log = logging.getLogger("pakfire")
-from config import Config
-from constants import *
-from i18n import _
+from .config import Config
+from .constants import *
+from .i18n import _
class Pakfire(object):
mode = None
self.config = self._load_config(configs)
# Update configuration with additional arguments.
- for section, settings in kwargs.items():
+ for section, settings in list(kwargs.items()):
self.config.update(section, settings)
# Dump the configuration.
def check_root_user(self):
if not os.getuid() == 0 or not os.getgid() == 0:
- raise Exception, "You must run pakfire as the root user."
+ raise Exception("You must run pakfire as the root user.")
def check_host_arch(self, arch):
"""
return True
if not system.host_supports_arch(arch):
- raise BuildError, "Cannot build for the target architecture: %s" % arch
+ raise BuildError("Cannot build for the target architecture: %s" % arch)
- raise BuildError, arch
+ raise BuildError(arch)
def check_is_ipfire(self):
ret = os.path.exists("/etc/ipfire-release")
if not ret:
- raise NotAnIPFireSystemError, "You can run pakfire only on an IPFire system"
+ raise NotAnIPFireSystemError("You can run pakfire only on an IPFire system")
@property
def builder(self):
# Check, if a package with the name is already in the resultset
# and always replace older ones by more recent ones.
- if pkgs.has_key(pkg.name):
+ if pkg.name in pkgs:
if pkgs[pkg.name] < pkg:
pkgs[pkg.name] = pkg
else:
b.build(stages=stages)
except Error:
- raise BuildError, _("Build command has failed.")
+ raise BuildError(_("Build command has failed."))
else:
# If the build was successful, cleanup all temporary files.
if not c.has_distro_conf():
log.error(_("You have not set the distribution for which you want to build."))
log.error(_("Please do so in builder.conf or on the CLI."))
- raise ConfigError, _("Distribution configuration is missing.")
+ raise ConfigError(_("Distribution configuration is missing."))
return c
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
import time
import uuid
-import base
-import cgroup
-import logger
-import packages
-import packages.file
-import packages.packager
-import repository
-import shell
-import util
-import _pakfire
+from . import _pakfire
+from . import cgroup
+from . import logger
+from . import packages
+from . import repository
+from . import shell
+from . import util
import logging
log = logging.getLogger("pakfire")
-from config import ConfigBuilder
-from system import system
-from constants import *
-from i18n import _
-from errors import BuildError, BuildRootLocked, Error
+from .config import ConfigBuilder
+from .system import system
+from .constants import *
+from .i18n import _
+from .errors import BuildError, BuildRootLocked, Error
BUILD_LOG_HEADER = """
def __init__(self, pakfire, filename=None, distro_name=None, build_id=None, logfile=None, release_build=True, **kwargs):
self.pakfire = pakfire
- # Check if the given pakfire instance is of the correct type.
- assert isinstance(self.pakfire, base.PakfireBuilder)
-
# Check if this host can build the requested architecture.
if not system.host_supports_arch(self.arch):
- raise BuildError, _("Cannot build for %s on this host.") % self.arch
+ raise BuildError(_("Cannot build for %s on this host.") % self.arch)
# Save the build id and generate one if no build id was provided.
if not build_id:
# Log the package information.
self.log.info(_("Package information:"))
- for line in self.pkg.dump(long=True).splitlines():
+ for line in self.pkg.dump(int=True).splitlines():
self.log.info(" %s" % line)
self.log.info("")
# we try to fall back to just set CLONE_NEWNS.
try:
_pakfire.unshare(_pakfire.SCHED_CLONE_NEWNS|_pakfire.SCHED_CLONE_NEWIPC|_pakfire.SCHED_CLONE_NEWUTS)
- except RuntimeError, e:
+ except RuntimeError as e:
_pakfire.unshare(_pakfire.SCHED_CLONE_NEWNS)
# Mount the directories.
try:
self._mountall()
- except OSError, e:
+ except OSError as e:
if e.errno == 30: # Read-only FS
- raise BuildError, "Buildroot is read-only: %s" % self.pakfire.path
+ raise BuildError("Buildroot is read-only: %s" % self.pakfire.path)
# Raise all other errors.
raise
try:
self._lock = open(filename, "a+")
- except IOError, e:
+ except IOError as e:
return 0
try:
fcntl.lockf(self._lock.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
- except IOError, e:
- raise BuildRootLocked, "Buildroot is locked"
+ except IOError as e:
+ raise BuildRootLocked("Buildroot is locked")
return 1
"logger" : self.log,
})
- if not kwargs.has_key("allow_downgrade"):
+ if "allow_downgrade" not in kwargs:
kwargs["allow_downgrade"] = True
# Install everything.
# Environment variables
env = self.environ
- if kwargs.has_key("env"):
+ if "env" in kwargs:
env.update(kwargs.pop("env"))
self.log.debug("Environment:")
def build(self, install_test=True, prepare=False):
if not self.pkg:
- raise BuildError, _("You cannot run a build when no package was given.")
+ raise BuildError(_("You cannot run a build when no package was given."))
# Search for the package file in build_dir and raise BuildError if it is not present.
if not os.path.exists(self.pkg_makefile):
- raise BuildError, _("Could not find makefile in build root: %s") % self.pkg_makefile
+ raise BuildError(_("Could not find makefile in build root: %s") % self.pkg_makefile)
# Write pakfire configuration into the chroot.
self.write_config()
return
# End here in case of an error.
- raise BuildError, _("The build command failed. See logfile for details.")
+ raise BuildError(_("The build command failed. See logfile for details."))
def install_test(self):
self.log.info(_("Running installation test..."))
if self.pakfire.distro.personality:
command = "%s %s" % (self.pakfire.distro.personality, command)
- for key, val in self.environ.items():
+ for key, val in list(self.environ.items()):
command = "%s=\"%s\" " % (key, val) + command
# Empty the environment
files = self.find_result_packages()
# Create a progressbar.
- print _("Signing packages...")
+ print(_("Signing packages..."))
p = util.make_progress(keyfp, len(files))
i = 0
# Close progressbar.
if p:
p.finish()
- print "" # Print an empty line.
+ print("") # Print an empty line.
def dump(self):
pkgs = []
self.log.info(_("Dumping package information:"))
for pkg in pkgs:
- dump = pkg.dump(long=True)
+ dump = pkg.dump(int=True)
for line in dump.splitlines():
self.log.info(" %s" % line)
try:
self.execute("%s/remove-static-libs %s %s" % \
(SCRIPT_DIR, self.buildroot, " ".join(keep_libs)))
- except ShellEnvironmentError, e:
+ except ShellEnvironmentError as e:
log.warning(_("Could not remove static libraries: %s") % e)
def post_compress_man_pages(self):
try:
self.execute("%s/compress-man-pages %s" % (SCRIPT_DIR, self.buildroot))
- except ShellEnvironmentError, e:
+ except ShellEnvironmentError as e:
log.warning(_("Compressing man pages did not complete successfully."))
def post_extract_debuginfo(self):
try:
self.execute("%s/extract-debuginfo %s %s" % (SCRIPT_DIR, " ".join(args), self.pkg.buildroot))
- except ShellEnvironmentError, e:
+ except ShellEnvironmentError as e:
log.error(_("Extracting debuginfo did not complete with success. Aborting build."))
raise
-#!/usr/bin/python
+#!/usr/bin/python3
import os
import shutil
# Remove the file tree.
try:
os.rmdir(self.path)
- except OSError, e:
+ except OSError as e:
# Ignore "Device or resource busy".
if e.errno == 16:
return
try:
os.kill(proc, sig)
- except OSError, e:
+ except OSError as e:
# Skip "No such process" error
if e.errno == 3:
pass
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
import sys
import tempfile
-import base
-import client
-import config
-import daemon
-import logger
-import packages
-import repository
-import server
-import transaction
-import util
-
-from system import system
-from constants import *
-from i18n import _
+from . import base
+from . import client
+from . import config
+from . import daemon
+from . import logger
+from . import packages
+from . import repository
+from . import server
+from . import transaction
+from . import util
+
+from .system import system
+from .constants import *
+from .i18n import _
# Initialize a very simple logging that is removed when a Pakfire instance
# is started.
try:
func = self.action2func[action]
except KeyError:
- raise Exception, "Unhandled action: %s" % action
+ raise Exception("Unhandled action: %s" % action)
return func()
p = self.create_pakfire()
for pkg in p.info(self.args.package):
- print pkg.dump(long=long)
+ print(pkg.dump(int=int))
def handle_search(self):
p = self.create_pakfire()
for pkg in p.search(self.args.pattern):
- print pkg.dump(short=True)
+ print(pkg.dump(short=True))
def handle_update(self, **args):
p = self.create_pakfire()
p = self.create_pakfire()
for pkg in p.provides(self.args.pattern):
- print pkg.dump(long=long)
+ print(pkg.dump(int=int))
def handle_grouplist(self):
p = self.create_pakfire()
for pkg in p.grouplist(self.args.group[0]):
- print " * %s" % pkg
+ print(" * %s" % pkg)
def handle_groupinstall(self):
p = self.create_pakfire()
FORMAT = " %-20s %8s %12s %12s "
title = FORMAT % (_("Repository"), _("Enabled"), _("Priority"), _("Packages"))
- print title
- print "=" * len(title) # spacing line
+ print(title)
+ print("=" * len(title)) # spacing line
for repo in repos:
- print FORMAT % (repo.name, repo.enabled, repo.priority, len(repo))
+ print(FORMAT % (repo.name, repo.enabled, repo.priority, len(repo)))
def handle_clean_all(self):
- print _("Cleaning up everything...")
+ print(_("Cleaning up everything..."))
p = self.create_pakfire()
p.clean_all()
source_packages = any([p.type == "source" for p in pkgs])
if binary_packages and source_packages:
- raise Error, _("Cannot extract mixed package types")
+ raise Error(_("Cannot extract mixed package types"))
if binary_packages and not target_prefix:
- raise Error, _("You must provide an install directory with --target=...")
+ raise Error(_("You must provide an install directory with --target=..."))
elif source_packages and not target_prefix:
target_prefix = "/usr/src/packages/"
if target_prefix == "/":
- raise Error, _("Cannot extract to /.")
+ raise Error(_("Cannot extract to /."))
for pkg in pkgs:
if pkg.type == "binary":
# Check if we are already running in a pakfire container. In that
# case, we cannot start another pakfire-builder.
if os.environ.get("container", None) == "pakfire-builder":
- raise PakfireContainerError, _("You cannot run pakfire-builder in a pakfire chroot.")
+ raise PakfireContainerError(_("You cannot run pakfire-builder in a pakfire chroot."))
self.parser = argparse.ArgumentParser(
description = _("Pakfire builder command line interface."),
help=_("Path were the output files should be copied to."))
def handle_info(self):
- Cli.handle_info(self, long=True)
+ Cli.handle_info(self, int=True)
def handle_build(self):
# Get the package descriptor from the command line options
pkg = os.path.abspath(pkg)
else:
- raise FileNotFoundError, pkg
+ raise FileNotFoundError(pkg)
# Build argument list.
kwargs = {
pkg = os.path.abspath(pkg)
else:
- raise FileNotFoundError, pkg
+ raise FileNotFoundError(pkg)
if self.args.mode == "release":
release_build = True
pkgs.append(pkg)
else:
- raise FileNotFoundError, pkg
+ raise FileNotFoundError(pkg)
# Put packages to where the user said or our
# current working directory.
p.dist(pkg, resultdir=resultdir)
def handle_provides(self):
- Cli.handle_provides(self, long=True)
+ Cli.handle_provides(self, int=True)
class CliServer(Cli):
for file in os.listdir(tmpdir):
file = os.path.join(tmpdir, file)
- print file
+ print(file)
finally:
if os.path.exists(tmpdir):
def handle_info(self):
info = self.server.info()
- print "\n".join(info)
+ print("\n".join(info))
class CliBuilderIntern(Cli):
if os.path.exists(pkg):
pkg = os.path.abspath(pkg)
else:
- raise FileNotFoundError, pkg
+ raise FileNotFoundError(pkg)
# Create pakfire instance.
c = config.ConfigBuilder()
pass
else:
- raise Exception, "Unknown filetype: %s" % package
+ raise Exception("Unknown filetype: %s" % package)
# Format arches.
if self.args.arch:
ret.append("")
for line in ret:
- print line
+ print(line)
def handle_connection_check(self):
ret = []
ret.append(_("You could not be authenticated to the build service."))
for line in ret:
- print line
+ print(line)
def _print_jobs(self, jobs, heading=None):
if heading:
- print "%s:" % heading
- print
+ print("%s:" % heading)
+ print()
for job in jobs:
line = " [%(type)8s] %(name)-30s: %(state)s"
- print line % job
+ print(line % job)
- print # Empty line at the end.
+ print() # Empty line at the end.
def handle_jobs_active(self):
jobs = self.client.get_active_jobs()
if not jobs:
- print _("No ongoing jobs found.")
+ print(_("No ongoing jobs found."))
return
self._print_jobs(jobs, _("Active build jobs"))
jobs = self.client.get_latest_jobs()
if not jobs:
- print _("No jobs found.")
+ print(_("No jobs found."))
return
self._print_jobs(jobs, _("Recently processed build jobs"))
build = self.client.get_build(build_id)
if not build:
- print _("A build with ID %s could not be found.") % build_id
+ print(_("A build with ID %s could not be found.") % build_id)
return
- print _("Build: %(name)s") % build
+ print(_("Build: %(name)s") % build)
fmt = "%-14s: %s"
lines = [
lines.append(" * [%(uuid)s] %(name)-30s: %(state)s" % job)
for line in lines:
- print " ", line
- print
+ print(" ", line)
+ print()
def handle_jobs_show(self):
(job_id,) = self.args.job_id
job = self.client.get_job(job_id)
if not job:
- print _("A job with ID %s could not be found.") % job_id
+ print(_("A job with ID %s could not be found.") % job_id)
return
builder = None
if job["builder_id"]:
builder = self.client.get_builder(job["builder_id"])
- print _("Job: %(name)s") % job
+ print(_("Job: %(name)s") % job)
fmt = "%-14s: %s"
lines = [
lines += [" %s" % line for line in pkg_lines]
for line in lines:
- print " ", line
- print # New line.
+ print(" ", line)
+ print() # New line.
def handle_test(self):
error_code = self.args.error_code[0]
error_code = 0
if error_code < 100 or error_code > 999:
- raise Error, _("Invalid error code given.")
+ raise Error(_("Invalid error code given."))
res = self.client.test_code(error_code)
- print _("Reponse from the server: %s") % res
+ print(_("Reponse from the server: %s") % res)
def watch_build(self, build_id):
- print self.client.build_get(build_id)
+ print(self.client.build_get(build_id))
# XXX TODO
- print build_id
+ print(build_id)
class CliDaemon(Cli):
realname = self.args.realname[0]
email = self.args.email[0]
- print _("Generating the key may take a moment...")
- print
+ print(_("Generating the key may take a moment..."))
+ print()
# Generate the key.
p = self.create_pakfire()
def handle_list(self):
p = self.create_pakfire()
for line in p.keyring.list_keys():
- print line
+ print(line)
def handle_sign(self):
# Get the files from the command line options
files.append(file)
else:
- raise FileNotFoundError, file
+ raise FileNotFoundError(file)
key = self.args.key[0]
# Open the package.
pkg = packages.open(p, None, file)
- print _("Signing %s...") % pkg.friendly_name
+ print(_("Signing %s...") % pkg.friendly_name)
pkg.sign(key)
def handle_verify(self):
# Open the package.
pkg = packages.open(p, None, file)
- print _("Verifying %s...") % pkg.friendly_name
+ print(_("Verifying %s...") % pkg.friendly_name)
sigs = pkg.verify()
for sig in sigs:
if key:
subkey = key.subkeys[0]
- print " %s %s" % (subkey.fpr[-16:], key.uids[0].uid)
+ print(" %s %s" % (subkey.fpr[-16:], key.uids[0].uid))
if sig.validity:
- print " %s" % _("This signature is valid.")
+ print(" %s" % _("This signature is valid."))
else:
- print " %s <%s>" % (sig.fpr, _("Unknown key"))
- print " %s" % _("Could not check if this signature is valid.")
+ print(" %s <%s>" % (sig.fpr, _("Unknown key")))
+ print(" %s" % _("Could not check if this signature is valid."))
created = datetime.datetime.fromtimestamp(sig.timestamp)
- print " %s" % _("Created: %s") % created
+ print(" %s" % _("Created: %s") % created)
if sig.exp_timestamp:
expires = datetime.datetime.fromtimestamp(sig.exp_timestamp)
- print " %s" % _("Expires: %s") % expires
+ print(" %s" % _("Expires: %s") % expires)
- print # Empty line
+ print() # Empty line
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
# #
###############################################################################
-import transport
+from . import transport
from pakfire.constants import *
from pakfire.i18n import _
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
# #
###############################################################################
-import pakfire.lzma as lzma
+import lzma
-from constants import *
-from i18n import _
+from .constants import *
+from .i18n import _
ALGO_DEFAULT = "xz"
# Iterate over all algoriths and their magic values
# and check for a match.
- for algo, magic in MAGICS.items():
+ for algo, magic in list(MAGICS.items()):
fileobj.seek(0)
start_sequence = fileobj.read(len(magic))
def decompressobj(name=None, fileobj=None, algo=ALGO_DEFAULT):
f_cls = FILES.get(algo, None)
if not f_cls:
- raise CompressionError, _("Given algorithm '%s' is not supported.")
+ raise CompressionError(_("Given algorithm '%s' is not supported."))
f = f_cls(name, fileobj=fileobj, mode="r")
def compressobj(name=None, fileobj=None, algo=ALGO_DEFAULT):
f_cls = FILES.get(algo, None)
if not f_cls:
- raise CompressionError, _("Given algorithm '%s' is not supported.")
+ raise CompressionError(_("Given algorithm '%s' is not supported."))
f = f_cls(name, fileobj=fileobj, mode="w")
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
# #
###############################################################################
+import configparser
import io
import os
import socket
-from ConfigParser import ConfigParser
-
import logging
log = logging.getLogger("pakfire")
-import logger
-from system import system
+from . import logger
+from .system import system
-from constants import *
-from i18n import _
+from .constants import *
+from .i18n import _
class _Config(object):
files = []
def get_repos(self):
repos = []
- for name, settings in self._config.items():
+ for name, settings in list(self._config.items()):
if not name.startswith("repo:"):
continue
# Parse the file.
with open(file) as f:
- self.parse(f.read())
+ self.parse(f)
# Save the filename to the list of read files.
self._files.append(file)
- def parse(self, s):
- if not s:
- return
-
- s = str(s)
- buf = io.BytesIO(s)
-
- config = ConfigParser()
- config.readfp(buf)
+ def parse(self, f):
+ config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
+ config.readfp(f)
# Read all data from the configuration file in the _config dict.
for section in config.sections():
(Only in debugging mode.)
"""
log.debug(_("Configuration:"))
- for section, settings in self._config.items():
+ for section, settings in list(self._config.items()):
log.debug(" " + _("Section: %s") % section)
- for k, v in settings.items():
+ for k, v in list(settings.items()):
log.debug(" %-20s: %s" % (k, v))
else:
log.debug(" " + _("No settings in this section."))
log.debug(" %s" % f)
def has_distro_conf(self):
- return self._config.has_key("distro")
+ return "distro" in self._config
def get_distro_conf(self):
return self.get_section("distro")
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
import os.path
-from errors import *
+from .errors import *
-from __version__ import PAKFIRE_VERSION
+from .__version__ import PAKFIRE_VERSION
PAKFIRE_LEAST_COMPATIBLE_VERSION = PAKFIRE_VERSION
"HOME" : "/root",
"LANG" : "C",
"PATH" : "/usr/bin:/bin:/usr/sbin:/sbin",
- "PS1" : "\u:\w\$ ",
+ "PS1" : "\\u:\w\$ ",
"TERM" : "vt100",
}
ORPHAN_DIRECTORIES.append(i)
-ORPHAN_DIRECTORIES.sort(cmp=lambda x,y: cmp(len(x), len(y)), reverse=True)
+ORPHAN_DIRECTORIES.sort(key=lambda x: len(x), reverse=True)
PACKAGE_INFO = """\
# Pakfire %(pakfire_version)s
-#!/usr/bin/python
+#!/usr/bin/python3
import hashlib
import json
import pakfire.util
from pakfire.system import system
-import base
-import transport
+from . import base
+from . import transport
from pakfire.constants import *
from pakfire.i18n import _
try:
return self[key]
except KeyError:
- raise AttributeError, key
+ raise AttributeError(key)
class PakfireDaemon(object):
f.close()
if not job.source_hash_sha512 == h.hexdigest():
- raise DownloadError, "Hash check did not succeed."
+ raise DownloadError("Hash check did not succeed.")
# Create a new instance of a build environment.
build = pakfire.builder.BuildEnviron(p, tmpfile,
self.upload_file(job, file, "package")
- except DependencyError, e:
+ except DependencyError as e:
message = "%s: %s" % (e.__class__.__name__, e)
self.update_state(job, "dependency_error", message)
raise
- except DownloadError, e:
+ except DownloadError as e:
message = "%s: %s" % (e.__class__.__name__, e)
self.update_state(job, "download_error", message)
raise
except (KeyboardInterrupt, SystemExit):
self.update_state(job, "aborted")
- except Exception, e:
+ except Exception as e:
# Format the exception and send it to the server.
message = "%s: %s" % (e.__class__.__name__, e)
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
import logging
log = logging.getLogger("pakfire")
-import system
+from . import system
class Distribution(object):
def __init__(self, data=None):
return
# Exceptional handling for arch.
- if config.has_key("arch"):
+ if "arch" in config:
self.arch = config["arch"]
del config["arch"]
def info(self):
info = {}
- for k, v in self.environ.items():
+ for k, v in list(self.environ.items()):
info[k.lower()] = v
return info
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
import json
import os
-import pycurl
import random
+import urllib
import logging
log = logging.getLogger("pakfire")
-from config import _Config
-
-import urlgrabber.grabber
-from urlgrabber.grabber import URLGrabber, URLGrabError
-from urlgrabber.mirror import MirrorGroup
-from urlgrabber.progress import TextMeter
+from . import progressbar
from pakfire.constants import *
from pakfire.i18n import _
-class PakfireGrabber(URLGrabber):
- """
- Class to make some modifications on the urlgrabber configuration.
- """
- def __init__(self, pakfire, *args, **kwargs):
- kwargs.update({
- "quote" : 0,
- "user_agent" : "pakfire/%s" % PAKFIRE_VERSION,
-
- "ssl_verify_host" : False,
- "ssl_verify_peer" : False,
- })
-
- if isinstance(pakfire, _Config):
- config = pakfire
- else:
- config = pakfire.config
- self.config = config
-
- # Set throttle setting.
- bandwidth_throttle = config.get("downloader", "bandwidth_throttle")
- if bandwidth_throttle:
- try:
- bandwidth_throttle = int(bandwidth_throttle)
- except ValueError:
- log.error("Configuration value for bandwidth_throttle is invalid.")
- bandwidth_throttle = 0
-
- kwargs.update({ "throttle" : bandwidth_throttle })
+class PakfireDownloader(object):
+ def __init__(self):
+ pass
- # Configure HTTP proxy.
- http_proxy = config.get("downloader", "http_proxy")
- if http_proxy:
- kwargs.update({ "proxies" : { "http" : http_proxy, "https" : http_proxy }})
-
- URLGrabber.__init__(self, *args, **kwargs)
-
- def check_offline_mode(self):
- offline = self.config.get("downloader", "offline")
- if not offline:
- return
-
- raise OfflineModeError
-
- def urlread(self, filename, *args, **kwargs):
- self.check_offline_mode()
-
- # This is for older versions of urlgrabber which are packaged in Debian
- # and Ubuntu and cannot handle filenames as a normal Python string but need
- # a unicode string.
- return URLGrabber.urlread(self, filename.encode("utf-8"), *args, **kwargs)
-
- def urlopen(self, filename, *args, **kwargs):
- self.check_offline_mode()
-
- # This is for older versions of urlgrabber which are packaged in Debian
- # and Ubuntu and cannot handle filenames as a normal Python string but need
- # a unicode string.
- return URLGrabber.urlopen(self, filename.encode("utf-8"), *args, **kwargs)
+ @property
+ def user_agent(self):
+ return "pakfire/%s" % PAKFIRE_VERSION
- def urlgrab(self, url, *args, **kwargs):
- self.check_offline_mode()
+ def set_proxy(self, *args, **kwargs):
+ pass
- # This is for older versions of urlgrabber which are packaged in Debian
- # and Ubuntu and cannot handle filenames as a normal Python string but need
- # a unicode string.
- return URLGrabber.urlgrab(self, url.encode("utf-8"), *args, **kwargs)
+ def use_mirrors(self, mirrors):
+ pass
class PackageDownloader(PakfireGrabber):
log.info(_("Downloading source files:"))
if self.pakfire.offline:
- raise OfflineModeError, _("Cannot download source code in offline mode.")
+ raise OfflineModeError(_("Cannot download source code in offline mode."))
# Create source download directory.
if not os.path.exists(SOURCE_CACHE_DIR):
for filename in download_files:
try:
self.grabber.urlgrab(os.path.basename(filename), filename=filename)
- except URLGrabError, e:
+ except URLGrabError as e:
# Remove partly downloaded file.
try:
os.unlink(filename)
except OSError:
pass
- raise DownloadError, "%s %s" % (os.path.basename(filename), e)
+ raise DownloadError("%s %s" % (os.path.basename(filename), e))
# Check if the downloaded file was empty.
if os.path.getsize(filename) == 0:
# Remove the file and raise an error.
os.unlink(filename)
- raise DownloadError, _("Downloaded empty file: %s") \
- % os.path.basename(filename)
+ raise DownloadError(_("Downloaded empty file: %s") \
+ % os.path.basename(filename))
log.info("")
try:
mirrordata = g.urlread(self.mirrorlist, limit=MIRRORLIST_MAXSIZE)
- except URLGrabError, e:
+ except URLGrabError as e:
log.warning("Could not update the mirrorlist for repo '%s': %s" % (self.repo.name, e))
return
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
# #
###############################################################################
-from i18n import _
+from .i18n import _
class commandTimeoutExpired(Exception):
pass # XXX cannot be as is
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
if len(parts) == 0: return ""
if len(parts) == 1: return parts[0]
return _("%(commas)s and %(last)s") % {
- "commas": u", ".join(parts[:-1]),
+ "commas": ", ".join(parts[:-1]),
"last": parts[len(parts) - 1],
}
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
import logging
log = logging.getLogger("pakfire")
-from constants import *
-from i18n import _
-from system import system
+from .constants import *
+from .i18n import _
+from .system import system
class Keyring(object):
def __init__(self, pakfire):
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
+++ /dev/null
-"""Interface to the liblzma compression library.
-
-This module provides a class for reading and writing compressed files,
-classes for incremental (de)compression, and convenience functions for
-one-shot (de)compression.
-
-These classes and functions support both the XZ and legacy LZMA
-container formats, as well as raw compressed data streams.
-"""
-
-__all__ = [
- "CHECK_NONE", "CHECK_CRC32", "CHECK_CRC64", "CHECK_SHA256",
- "CHECK_ID_MAX", "CHECK_UNKNOWN",
- "FILTER_LZMA1", "FILTER_LZMA2", "FILTER_DELTA", "FILTER_X86", "FILTER_IA64",
- "FILTER_ARM", "FILTER_ARMTHUMB", "FILTER_POWERPC", "FILTER_SPARC",
- "FORMAT_AUTO", "FORMAT_XZ", "FORMAT_ALONE", "FORMAT_RAW",
- "MF_HC3", "MF_HC4", "MF_BT2", "MF_BT3", "MF_BT4",
- "MODE_FAST", "MODE_NORMAL", "PRESET_DEFAULT", "PRESET_EXTREME",
-
- "LZMACompressor", "LZMADecompressor", "LZMAFile", "LZMAError",
- "compress", "decompress", "check_is_supported",
-]
-
-import io
-from _lzma import *
-
-
-_MODE_CLOSED = 0
-_MODE_READ = 1
-_MODE_READ_EOF = 2
-_MODE_WRITE = 3
-
-_BUFFER_SIZE = 8192
-
-
-class LZMAFile(io.BufferedIOBase):
-
- """A file object providing transparent LZMA (de)compression.
-
- An LZMAFile can act as a wrapper for an existing file object, or
- refer directly to a named file on disk.
-
- Note that LZMAFile provides a *binary* file interface - data read
- is returned as bytes, and data to be written must be given as bytes.
- """
-
- def __init__(self, filename=None, mode="r",
- fileobj=None, format=None, check=-1,
- preset=None, filters=None):
- """Open an LZMA-compressed file.
-
- If filename is given, open the named file. Otherwise, operate on
- the file object given by fileobj. Exactly one of these two
- parameters should be provided.
-
- mode can be "r" for reading (default), "w" for (over)writing, or
- "a" for appending.
-
- format specifies the container format to use for the file.
- If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the
- default is FORMAT_XZ.
-
- check specifies the integrity check to use. This argument can
- only be used when opening a file for writing. For FORMAT_XZ,
- the default is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not
- support integrity checks - for these formats, check must be
- omitted, or be CHECK_NONE.
-
- When opening a file for reading, the *preset* argument is not
- meaningful, and should be omitted. The *filters* argument should
- also be omitted, except when format is FORMAT_RAW (in which case
- it is required).
-
- When opening a file for writing, the settings used by the
- compressor can be specified either as a preset compression
- level (with the *preset* argument), or in detail as a custom
- filter chain (with the *filters* argument). For FORMAT_XZ and
- FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset
- level. For FORMAT_RAW, the caller must always specify a filter
- chain; the raw compressor does not support preset compression
- levels.
-
- preset (if provided) should be an integer in the range 0-9,
- optionally OR-ed with the constant PRESET_EXTREME.
-
- filters (if provided) should be a sequence of dicts. Each dict
- should have an entry for "id" indicating ID of the filter, plus
- additional entries for options to the filter.
- """
- self._fp = None
- self._closefp = False
- self._mode = _MODE_CLOSED
- self._pos = 0
- self._size = -1
-
- if mode == "r":
- if check != -1:
- raise ValueError("Cannot specify an integrity check "
- "when opening a file for reading")
- if preset is not None:
- raise ValueError("Cannot specify a preset compression "
- "level when opening a file for reading")
- if format is None:
- format = FORMAT_AUTO
- mode_code = _MODE_READ
- # Save the args to pass to the LZMADecompressor initializer.
- # If the file contains multiple compressed streams, each
- # stream will need a separate decompressor object.
- self._init_args = {"format":format, "filters":filters}
- self._decompressor = LZMADecompressor(**self._init_args)
- self._buffer = None
- elif mode in ("w", "a"):
- if format is None:
- format = FORMAT_XZ
- mode_code = _MODE_WRITE
- self._compressor = LZMACompressor(format=format, check=check,
- preset=preset, filters=filters)
- else:
- raise ValueError("Invalid mode: {!r}".format(mode))
-
- if filename is not None and fileobj is None:
- mode += "b"
- self._fp = open(filename, mode)
- self._closefp = True
- self._mode = mode_code
- elif fileobj is not None and filename is None:
- self._fp = fileobj
- self._mode = mode_code
- else:
- raise ValueError("Must give exactly one of filename and fileobj")
-
- def close(self):
- """Flush and close the file.
-
- May be called more than once without error. Once the file is
- closed, any other operation on it will raise a ValueError.
- """
- if self._mode == _MODE_CLOSED:
- return
- try:
- if self._mode in (_MODE_READ, _MODE_READ_EOF):
- self._decompressor = None
- self._buffer = None
- elif self._mode == _MODE_WRITE:
- self._fp.write(self._compressor.flush())
- self._compressor = None
- finally:
- try:
- if self._closefp:
- self._fp.close()
- finally:
- self._fp = None
- self._closefp = False
- self._mode = _MODE_CLOSED
-
- @property
- def closed(self):
- """True if this file is closed."""
- return self._mode == _MODE_CLOSED
-
- def fileno(self):
- """Return the file descriptor for the underlying file."""
- self._check_not_closed()
- return self._fp.fileno()
-
- def seekable(self):
- """Return whether the file supports seeking."""
- return self.readable()
-
- def readable(self):
- """Return whether the file was opened for reading."""
- self._check_not_closed()
- return self._mode in (_MODE_READ, _MODE_READ_EOF)
-
- def writable(self):
- """Return whether the file was opened for writing."""
- self._check_not_closed()
- return self._mode == _MODE_WRITE
-
- # Mode-checking helper functions.
-
- def _check_not_closed(self):
- if self.closed:
- raise ValueError("I/O operation on closed file")
-
- def _check_can_read(self):
- if not self.readable():
- raise io.UnsupportedOperation("File not open for reading")
-
- def _check_can_write(self):
- if not self.writable():
- raise io.UnsupportedOperation("File not open for writing")
-
- def _check_can_seek(self):
- if not self.seekable():
- raise io.UnsupportedOperation("Seeking is only supported "
- "on files open for reading")
-
- # Fill the readahead buffer if it is empty. Returns False on EOF.
- def _fill_buffer(self):
- if self._buffer:
- return True
-
- if self._decompressor.unused_data:
- rawblock = self._decompressor.unused_data
- else:
- rawblock = self._fp.read(_BUFFER_SIZE)
-
- if not rawblock:
- if self._decompressor.eof:
- self._mode = _MODE_READ_EOF
- self._size = self._pos
- return False
- else:
- raise EOFError("Compressed file ended before the "
- "end-of-stream marker was reached")
-
- # Continue to next stream.
- if self._decompressor.eof:
- self._decompressor = LZMADecompressor(**self._init_args)
-
- self._buffer = self._decompressor.decompress(rawblock)
- return True
-
- # Read data until EOF.
- # If return_data is false, consume the data without returning it.
- def _read_all(self, return_data=True):
- blocks = []
- while self._fill_buffer():
- if return_data:
- blocks.append(self._buffer)
- self._pos += len(self._buffer)
- self._buffer = None
- if return_data:
- return b"".join(blocks)
-
- # Read a block of up to n bytes.
- # If return_data is false, consume the data without returning it.
- def _read_block(self, n, return_data=True):
- blocks = []
- while n > 0 and self._fill_buffer():
- if n < len(self._buffer):
- data = self._buffer[:n]
- self._buffer = self._buffer[n:]
- else:
- data = self._buffer
- self._buffer = None
- if return_data:
- blocks.append(data)
- self._pos += len(data)
- n -= len(data)
- if return_data:
- return b"".join(blocks)
-
- def peek(self, size=-1):
- """Return buffered data without advancing the file position.
-
- Always returns at least one byte of data, unless at EOF.
- The exact number of bytes returned is unspecified.
- """
- self._check_can_read()
- if self._mode == _MODE_READ_EOF or not self._fill_buffer():
- return b""
- return self._buffer
-
- def read(self, size=-1):
- """Read up to size uncompressed bytes from the file.
-
- If size is negative or omitted, read until EOF is reached.
- Returns b"" if the file is already at EOF.
- """
- self._check_can_read()
- if self._mode == _MODE_READ_EOF or size == 0:
- return b""
- elif size < 0:
- return self._read_all()
- else:
- return self._read_block(size)
-
- def read1(self, size=-1):
- """Read up to size uncompressed bytes with at most one read
- from the underlying stream.
-
- Returns b"" if the file is at EOF.
- """
- self._check_can_read()
- if (size == 0 or self._mode == _MODE_READ_EOF or
- not self._fill_buffer()):
- return b""
- if 0 < size < len(self._buffer):
- data = self._buffer[:size]
- self._buffer = self._buffer[size:]
- else:
- data = self._buffer
- self._buffer = None
- self._pos += len(data)
- return data
-
- def write(self, data):
- """Write a bytes object to the file.
-
- Returns the number of uncompressed bytes written, which is
- always len(data). Note that due to buffering, the file on disk
- may not reflect the data written until close() is called.
- """
- self._check_can_write()
- compressed = self._compressor.compress(data)
- self._fp.write(compressed)
- self._pos += len(data)
- return len(data)
-
- # Rewind the file to the beginning of the data stream.
- def _rewind(self):
- self._fp.seek(0, 0)
- self._mode = _MODE_READ
- self._pos = 0
- self._decompressor = LZMADecompressor(**self._init_args)
- self._buffer = None
-
- def seek(self, offset, whence=0):
- """Change the file position.
-
- The new position is specified by offset, relative to the
- position indicated by whence. Possible values for whence are:
-
- 0: start of stream (default): offset must not be negative
- 1: current stream position
- 2: end of stream; offset must not be positive
-
- Returns the new file position.
-
- Note that seeking is emulated, sp depending on the parameters,
- this operation may be extremely slow.
- """
- self._check_can_seek()
-
- # Recalculate offset as an absolute file position.
- if whence == 0:
- pass
- elif whence == 1:
- offset = self._pos + offset
- elif whence == 2:
- # Seeking relative to EOF - we need to know the file's size.
- if self._size < 0:
- self._read_all(return_data=False)
- offset = self._size + offset
- else:
- raise ValueError("Invalid value for whence: {}".format(whence))
-
- # Make it so that offset is the number of bytes to skip forward.
- if offset < self._pos:
- self._rewind()
- else:
- offset -= self._pos
-
- # Read and discard data until we reach the desired position.
- if self._mode != _MODE_READ_EOF:
- self._read_block(offset, return_data=False)
-
- return self._pos
-
- def tell(self):
- """Return the current file position."""
- self._check_not_closed()
- return self._pos
-
-
-def compress(data, format=FORMAT_XZ, check=-1, preset=None, filters=None):
- """Compress a block of data.
-
- Refer to LZMACompressor's docstring for a description of the
- optional arguments *format*, *check*, *preset* and *filters*.
-
- For incremental compression, use an LZMACompressor object instead.
- """
- comp = LZMACompressor(format, check, preset, filters)
- return comp.compress(data) + comp.flush()
-
-
-def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None):
- """Decompress a block of data.
-
- Refer to LZMADecompressor's docstring for a description of the
- optional arguments *format*, *check* and *filters*.
-
- For incremental decompression, use a LZMADecompressor object instead.
- """
- results = []
- while True:
- decomp = LZMADecompressor(format, memlimit, filters)
- results.append(decomp.decompress(data))
- if not decomp.eof:
- raise LZMAError("Compressed data ended before the "
- "end-of-stream marker was reached")
- if not decomp.unused_data:
- return b"".join(results)
- # There is unused data left over. Proceed to next stream.
- data = decomp.unused_data
import tarfile
-from base import Package
-from file import BinaryPackage, FilePackage, SourcePackage
-from installed import DatabasePackage, InstalledPackage
-from solv import SolvPackage
+from . import file
-from make import Makefile
+from .base import Package
+from .installed import DatabasePackage, InstalledPackage
+from .solv import SolvPackage
+
+from .make import Makefile
from pakfire.constants import *
# Simply check if the given file is a tarfile.
if tarfile.is_tarfile(filename):
if filename.endswith(".src.%s" % PACKAGE_EXTENSION):
- return SourcePackage(pakfire, repo, filename)
+ return file.SourcePackage(pakfire, repo, filename)
- return BinaryPackage(pakfire, repo, filename)
+ return file.BinaryPackage(pakfire, repo, filename)
elif filename.endswith(".%s" % MAKEFILE_EXTENSION):
return Makefile(pakfire, filename)
@property
def metadata(self):
- raise NotImplementedError, self
+ raise NotImplementedError(self)
@property
def friendly_name(self):
return []
def extract(self, path, prefix=None):
- raise NotImplementedError, "%s" % repr(self)
+ raise NotImplementedError("%s" % repr(self))
def remove(self, message=None, prefix=None):
# Make two filelists. One contains all binary files that need to be
try:
shutil.move(file, file_save)
- except shutil.Error, e:
- print e
+ except shutil.Error as e:
+ print(e)
if prefix:
file_save = os.path.relpath(file_save, prefix)
###############################################################################
import hashlib
+import lzma
import os
import re
import shutil
import logging
log = logging.getLogger("pakfire")
-import pakfire.filelist
-import pakfire.lzma as lzma
-import pakfire.util as util
-import pakfire.compress as compress
-from pakfire.constants import *
-from pakfire.i18n import _
+from ..constants import *
+from ..i18n import _
-import base
-import lexer
-import make
-import tar
+from .. import compress
+from .. import filelist
+
+from . import base
+from . import lexer
+from . import make
+from . import tar
class FilePackage(base.Package):
"""
pass
else:
- raise PackageFormatUnsupportedError, _("Filename: %s") % self.filename
+ raise PackageFormatUnsupportedError(_("Filename: %s") % self.filename)
def check(self):
"""
can be opened.
"""
if not tarfile.is_tarfile(self.filename):
- raise FileError, "Given file is not of correct format: %s" % self.filename
+ raise FileError("Given file is not of correct format: %s" % self.filename)
assert self.format in PACKAGE_FORMATS_SUPPORTED, self.format
payload_archive = tar.InnerTarFile.open(fileobj=payload)
else:
- raise Exception, "Unhandled payload compression type: %s" % \
- self.payload_compression
+ raise Exception("Unhandled payload compression type: %s" % \
+ self.payload_compression)
return payload_archive
pb = None
if message:
message = "%-10s : %s" % (message, self.friendly_name)
+
+ from . import util
pb = util.make_progress(message, len(self.filelist), eta=False)
# Collect messages with errors and warnings, that are passed to
i = 0
while True:
- member = payload_archive.next()
+ member = next(payload_archive)
if not member:
break
# Search for filename.
while True:
- member = payload_archive.next()
+ member = next(payload_archive)
if not member:
break
a.close()
sigs = []
- for signature in self.signatures.values():
+ for signature in list(self.signatures.values()):
sigs += self.pakfire.keyring.verify(signature, chksums)
# Open the archive to access all files we will need.
f.close()
a.close()
- for filename, chksum in chksums.items():
+ for filename, chksum in list(chksums.items()):
ret = self.check_chksum(filename, chksum)
if ret:
else:
log.debug("Checksum of %s does not match." % filename)
- raise Exception, "Checksum does not match: %s" % filename
+ raise Exception("Checksum does not match: %s" % filename)
return sigs
"""
Calculate the hash1 of this package.
"""
+ from . import util
return util.calc_hash1(self.filename)
@property
import pakfire.downloader
import pakfire.filelist
-from base import Package
-from file import BinaryPackage
+from .base import Package
+from .file import BinaryPackage
import pakfire.util as util
from pakfire.constants import *
self._data = {}
self._filelist = None
- for key in data.keys():
+ for key in list(data.keys()):
self._data[key] = data[key]
def __repr__(self):
# Verify if the download was okay.
if not cache.verify(cache_filename, self.hash1):
- raise Exception, "XXX this should never happen..."
+ raise Exception("XXX this should never happen...")
filename = os.path.join(cache.path, cache_filename)
return BinaryPackage(self.pakfire, self.repo, filename)
break
if not found:
- raise LexerUnhandledLine, "%d: %s" % (self.lineno, line)
+ raise LexerUnhandledLine("%d: %s" % (self.lineno, line))
def read_block(self, pattern_start=None, pattern_line=None, pattern_end=None,
raw=False):
continue
if not line.startswith(LEXER_BLOCK_LINE_INDENT):
- raise LexerError, "Line has not the right indentation: %d: %s" \
- % (self.lineno, line)
+ raise LexerError("Line has not the right indentation: %d: %s" \
+ % (self.lineno, line))
- raise LexerUnhandledLine, "%d: %s" % (self.lineno, line)
+ raise LexerUnhandledLine("%d: %s" % (self.lineno, line))
return (groups, lines)
if not line:
return
- raise LexerUnhandledLine, "%d: %s" % (self.lineno, line)
+ raise LexerUnhandledLine("%d: %s" % (self.lineno, line))
DEP_DEFINITIONS = ("prerequires", "requires", "provides", "conflicts", "obsoletes", "recommends", "suggests")
m = re.match(pattern, line)
if not m:
- raise LexerError, "Not a definition: %s" % line
+ raise LexerError("Not a definition: %s" % line)
# Line was correctly parsed, can go on.
self._lineno += 1
m = re.match(LEXER_DEFINE_BEGIN, line)
if not m:
- raise Exception, "XXX not a define"
+ raise Exception("XXX not a define")
# Check content of next line.
found = None
if found is None:
line = self.get_line(self._lineno)
- raise LexerUnhandledLine, "%d: %s" % (self.lineno, line)
+ raise LexerUnhandledLine("%d: %s" % (self.lineno, line))
# Go in to next line.
self._lineno += 1
value.append("")
continue
- raise LexerError, "Unhandled line: %s" % line
+ raise LexerError("Unhandled line: %s" % line)
self._definitions[key] = "\n".join(value)
found = True
if not found:
- raise LexerError, "No valid begin of if statement: %d: %s" \
- % (self.lineno, line)
+ raise LexerError("No valid begin of if statement: %d: %s" \
+ % (self.lineno, line))
self._lineno += 1
clause = m.groups()
lines.append("")
continue
- raise LexerUnhandledLine, "%d: %s" % (self.lineno, line)
+ raise LexerUnhandledLine("%d: %s" % (self.lineno, line))
if not block_closed:
- raise LexerError, "Unclosed if block"
+ raise LexerError("Unclosed if block")
return (clause, lines)
line = self.get_line(self._lineno)
m = re.match(LEXER_IF_END, line)
if not m:
- raise LexerError, "Unclosed if clause"
+ raise LexerError("Unclosed if clause")
self._lineno += 1
elif op == "!=":
val = not a == b
else:
- raise LexerError, "Unknown operator: %s" % op
+ raise LexerError("Unknown operator: %s" % op)
else:
# Else is always true.
m = re.match(LEXER_SCRIPTLET_BEGIN, line)
if not m:
- raise Exception, "Not a scriptlet"
+ raise Exception("Not a scriptlet")
self._lineno += 1
name = m.group(1)
# check if scriptlet was already defined.
- if self.scriptlets.has_key(name):
- raise Exception, "Scriptlet %s is already defined" % name
+ if name in self.scriptlets:
+ raise Exception("Scriptlet %s is already defined" % name)
lang = m.group(2) or "shell"
lines = [
self._lineno += 1
continue
- raise LexerUnhandledLine, "%d: %s" % (self.lineno, line)
+ raise LexerUnhandledLine("%d: %s" % (self.lineno, line))
self.scriptlets[name] = {
"lang" : lang,
m = re.match(LEXER_PACKAGE_INHERIT, line)
if not m:
- raise LexerError, "Not a template inheritance: %s" % line
+ raise LexerError("Not a template inheritance: %s" % line)
self._lineno += 1
# Import all environment variables.
if environ:
- for k, v in environ.items():
+ for k, v in list(environ.items()):
self._definitions[k] = v
self.exports.append(k)
m = re.match(LEXER_BUILD_BEGIN, line)
if not m:
- raise LexerError, "Not a build statement: %s" % line
+ raise LexerError("Not a build statement: %s" % line)
self._lineno += 1
m = re.match(LEXER_INCLUDE, line)
if not m:
- raise LexerError, "Not an include statement: %s" % line
+ raise LexerError("Not an include statement: %s" % line)
# Get the filename from the line.
file = m.group(1)
# Copy all templates and packages but make sure
# to update the parent lexer (for accessing each other).
- for name, template in other.templates.items():
+ for name, template in list(other.templates.items()):
template.parent = self
self._templates[name] = template
m = re.match(LEXER_TEMPLATE_BEGIN, line)
if not m:
- raise Exception, "Not a template"
+ raise Exception("Not a template")
# Line was correctly parsed, can go on.
self._lineno += 1
m = re.match(LEXER_PACKAGE_BEGIN, line)
if not m:
- raise Exception, "Not a package: %s" %line
+ raise Exception("Not a package: %s" %line)
self._lineno += 1
m = re.match(LEXER_VALID_PACKAGE_NAME, name)
if not m:
- raise LexerError, "Invalid package name: %s" % name
+ raise LexerError("Invalid package name: %s" % name)
lines = ["_name = %s" % name]
# If there is an unhandled line in a block, we raise an error.
if opened:
- raise Exception, "XXX unhandled line in package block: %s" % line
+ raise Exception("XXX unhandled line in package block: %s" % line)
# If the block was never opened, we just go on.
else:
break
if opened:
- raise LexerError, "Unclosed package block '%s'." % name
+ raise LexerError("Unclosed package block '%s'." % name)
package = PackageLexer(lines, parent=self)
self.packages.append(package)
import tempfile
import uuid
-from urlgrabber.grabber import URLGrabber, URLGrabError
-from urlgrabber.progress import TextMeter
-
-import lexer
-import packager
+from . import lexer
+from . import packager
import logging
log = logging.getLogger("pakfire")
-import pakfire.downloader as downloader
-import pakfire.util as util
+from ..constants import *
+from ..i18n import _
-from base import Package
+from .. import downloader
+from .. import system
+from .. import util
-from pakfire.constants import *
-from pakfire.i18n import _
-from pakfire.system import system
+from .base import Package
class MakefileBase(Package):
def __init__(self, pakfire, filename=None, lines=None):
# Update environment.
environ = self.pakfire.distro.environ
environ.update({
- "PARALLELISMFLAGS" : "-j%d" % system.parallelism,
+ "PARALLELISMFLAGS" : "-j%d" % system.system.parallelism,
})
if filename:
import logging
log = logging.getLogger("pakfire")
-import pakfire.lzma as lzma
import pakfire.util as util
from pakfire.constants import *
from pakfire.i18n import _
-import file
-import tar
+from . import tar
class Packager(object):
payload_compression = None
datafile = tar.InnerTarFile.open(datafile)
while True:
- m = datafile.next()
+ m = next(datafile)
if not m:
break
t = tar.InnerTarFile.open(datafile)
while True:
- m = t.next()
+ m = next(t)
if not m:
break
try:
f = open(path, "b")
except OSError:
- raise Exception, "Cannot open script file: %s" % lang["path"]
+ raise Exception("Cannot open script file: %s" % lang["path"])
s = open(scriptlet_file, "wb")
s.close()
else:
- raise Exception, "Unknown scriptlet language: %s" % scriptlet["lang"]
+ raise Exception("Unknown scriptlet language: %s" % scriptlet["lang"])
scriptlets.append((scriptlet_name, scriptlet_file))
except OSError:
shutil.copy2(tempfile, resultfile)
+ # XXX to resolve a cyclic dependency
+ from . import file
+
return file.BinaryPackage(self.pakfire, self.pakfire.repos.dummy, resultfile)
import os
import re
-import base
-import file
+from . import base
+from . import file
class SolvPackage(base.Package):
def __init__(self, pakfire, solvable, repo=None):
# #
###############################################################################
+import lzma
import os
import tarfile
import logging
log = logging.getLogger("pakfire")
-import pakfire.lzma as lzma
import pakfire.util as util
from pakfire.constants import *
from pakfire.i18n import _
# Extract file the normal way...
try:
tarfile.TarFile.extract(self, member, path)
- except OSError, e:
+ except OSError as e:
log.warning(_("Could not extract file: /%(src)s - %(dst)s") \
% { "src" : member.name, "dst" : e, })
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
# #
###############################################################################
-from __future__ import division
+
import datetime
import fcntl
import termios
import time
-import util
+from . import util
-from i18n import _
+from .i18n import _
DEFAULT_VALUE_MAX = 100
DEFAULT_TERM_WIDTH = 80
import logging
log = logging.getLogger("pakfire")
-import pakfire.packages as packages
+from .. import packages
-from pakfire.i18n import _
+from .base import RepositoryDummy
+from .local import RepositoryDir, RepositoryBuild
+from .remote import RepositoryRemote
+from .system import RepositorySystem
-from base import RepositoryDummy
-from local import RepositoryDir, RepositoryBuild
-from remote import RepositoryRemote
-from system import RepositorySystem
+from ..i18n import _
class Repositories(object):
"""
self.__repos = {}
# Create a dummy repository
- self.dummy = RepositoryDummy(self.pakfire)
+ from . import base
+ self.dummy = base.RepositoryDummy(self.pakfire)
# Create the local repository.
self.local = RepositorySystem(self.pakfire)
self._parse(repo_name, repo_args)
def __iter__(self):
- repositories = self.__repos.values()
+ repositories = list(self.__repos.values())
repositories.sort()
return iter(repositories)
"arch" : self.distro.arch,
}
- for k, v in _args.items():
+ for k, v in list(_args.items()):
# Skip all non-strings.
if not type(v) == type("a"):
continue
self.add_repo(repo)
def add_repo(self, repo):
- if self.__repos.has_key(repo.name):
- raise Exception, "Repository with that name does already exist: %s" % repo.name
+ if repo.name in self.__repos:
+ raise Exception("Repository with that name does already exist: %s" % repo.name)
self.__repos[repo.name] = repo
import logging
log = logging.getLogger("pakfire")
-import index
+from .. import satsolver
-import pakfire.packages as packages
-import pakfire.satsolver as satsolver
+from . import index
+from . import packages
class RepositoryFactory(object):
def __init__(self, pakfire, name, description):
dumps = []
# Dump all package information of the packages in this repository.
for pkg in self:
- dump = pkg.dump(long=long, filelist=filelist)
+ dump = pkg.dump(int=int, filelist=filelist)
dumps.append(dump)
return "\n\n".join(dumps)
# Check if we actually can open the database.
if not self.format in DATABASE_FORMATS_SUPPORTED:
- raise DatabaseFormatError, _("The format of the database is not supported by this version of pakfire.")
+ raise DatabaseFormatError(_("The format of the database is not supported by this version of pakfire."))
def __len__(self):
count = 0
# Check if database version is supported.
if self.format > DATABASE_FORMAT:
- raise DatabaseError, _("Cannot use database with version greater than %s.") % DATABASE_FORMAT
+ raise DatabaseError(_("Cannot use database with version greater than %s.") % DATABASE_FORMAT)
log.info(_("Migrating database from format %(old)s to %(new)s.") % \
{ "old" : self.format, "new" : DATABASE_FORMAT })
import logging
log = logging.getLogger("pakfire")
-import pakfire.packages as packages
-import pakfire.satsolver as satsolver
+from .. import packages
class Index(object):
"""
def add_package(self, pkg):
log.debug("Adding package to index %s: %s" % (self, pkg))
+ from .. import satsolver
solvable = satsolver.Solvable(self.solver_repo, pkg.name,
pkg.friendly_version, pkg.arch)
import os
import shutil
import tempfile
-import urlgrabber
import logging
log = logging.getLogger("pakfire")
-import base
-import metadata
+from . import base
+from . import metadata
import pakfire.compress as compress
import pakfire.downloader as downloader
###############################################################################
import os
-import urlgrabber
import logging
log = logging.getLogger("pakfire")
-import base
-import cache
-import metadata
+from . import base
+from . import cache
+from . import metadata
import pakfire.compress as compress
import pakfire.downloader as downloader
"http://" : 75,
}
- for url, prio in url2priority.items():
+ for url, prio in list(url2priority.items()):
if self.baseurl.startswith(url):
priority = prio
break
exists = self.cache.exists(cache_filename)
if not exists and offline:
- raise OfflineModeError, _("No metadata available for repository %s. Cannot download any.") \
- % self.name
+ raise OfflineModeError(_("No metadata available for repository %s. Cannot download any.") \
+ % self.name)
elif exists and offline:
# Repository metadata exists. We cannot update anything because of the offline mode.
while True:
try:
data = grabber.urlread(filename, limit=METADATA_DOWNLOAD_LIMIT)
- except urlgrabber.grabber.URLGrabError, e:
+ except urlgrabber.grabber.URLGrabError as e:
if e.errno == 256:
- raise DownloadError, _("Could not update metadata for %s from any mirror server") % self.name
+ raise DownloadError(_("Could not update metadata for %s from any mirror server") % self.name)
grabber.increment_mirror(grabber)
continue
# Raise an exception when we are running in offline mode but an update is required.
if force and offline:
- raise OfflineModeError, _("Cannot download package database for %s in offline mode.") % self.name
+ raise OfflineModeError(_("Cannot download package database for %s in offline mode.") % self.name)
elif not force:
return
# If we are in offline mode, we cannot download any files.
if self.pakfire.offline and not self.baseurl.startswith("file://"):
- raise OfflineModeError, _("Cannot download this file in offline mode: %s") \
- % filename
+ raise OfflineModeError(_("Cannot download this file in offline mode: %s") \
+ % filename)
try:
i = grabber.urlopen(filename)
- except urlgrabber.grabber.URLGrabError, e:
- raise DownloadError, _("Could not download %s: %s") % (filename, e)
+ except urlgrabber.grabber.URLGrabError as e:
+ raise DownloadError(_("Could not download %s: %s") % (filename, e))
# Open input and output files and download the file.
o = self.cache.open(cache_filename, "w")
import os
-import base
-import database
+from . import base
+from . import database
import pakfire.packages as packages
import pakfire.util as util
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
import logging
log = logging.getLogger("pakfire")
-import filelist
-import packages
-import transaction
-import util
-import _pakfire
+from . import _pakfire
+from . import filelist
+from . import packages
+from . import transaction
+from . import util
-from constants import *
-from i18n import _
+from .constants import *
+from .i18n import _
# Put some variables into our own namespace, to make them easily accessible
# for code, that imports the satsolver module.
if solver.status:
return solver
- raise DependencyError, solver.get_problem_string()
+ raise DependencyError(solver.get_problem_string())
def solve(self, request, interactive=False, logger=None, force_best=False, **kwargs):
# XXX implement interactive
solver = Solver(self, request, logger=logger)
# Apply configuration to solver.
- for key, val in kwargs.items():
+ for key, val in list(kwargs.items()):
solver.set(key, val)
# Do the solving.
self.install_name(what)
return
- raise Exception, "Unknown type"
+ raise Exception("Unknown type")
def remove(self, what):
if isinstance(what, Solvable):
self.remove_name(what)
return
- raise Exception, "Unknown type"
+ raise Exception("Unknown type")
def update(self, what):
if isinstance(what, Solvable):
self.update_name(what)
return
- raise Exception, "Unknown type"
+ raise Exception("Unknown type")
def lock(self, what):
if isinstance(what, Solvable):
self.lock_name(what)
return
- raise Exception, "Unknown type"
+ raise Exception("Unknown type")
def noobsoletes(self, what):
if isinstance(what, Solvable):
self.noobsoletes_name(what)
return
- raise Exception, "Unknown type"
+ raise Exception("Unknown type")
class Solver(object):
try:
flag = self.option2flag[option]
except KeyError:
- raise Exception, "Unknown configuration setting: %s" % option
+ raise Exception("Unknown configuration setting: %s" % option)
self.solver.set_flag(flag, value)
def get(self, option):
try:
flag = self.option2flag[option]
except KeyError:
- raise Exception, "Unknown configuration setting: %s" % option
+ raise Exception("Unknown configuration setting: %s" % option)
return self.solver.get_flag(flag)
def solve(self, force_best=False):
if self.status:
self.logger.info(_("Dependency solving finished in %.2f ms") % (self.time * 1000))
else:
- raise DependencyError, self.get_problem_string()
+ raise DependencyError(self.get_problem_string())
@property
def problems(self):
if not util.ask_user(_("Do you want to manually alter the request?")):
return False
- print _("You can now try to satisfy the solver by modifying your request.")
+ print(_("You can now try to satisfy the solver by modifying your request."))
altered = False
while True:
if len(problems) > 1:
- print _("Which problem to you want to resolve?")
+ print(_("Which problem to you want to resolve?"))
if altered:
- print _("Press enter to try to re-solve the request.")
- print "[1-%s]:" % len(problems),
+ print(_("Press enter to try to re-solve the request."))
+ print("[1-%s]:" % len(problems), end=' ')
- answer = raw_input()
+ answer = input()
# If the user did not enter anything, we abort immediately.
if not answer:
if len(solutions) == 1:
solution = solutions[0]
- print _(" Solution: %s") % solution
- print
+ print(_(" Solution: %s") % solution)
+ print()
if util.ask_user("Do you accept the solution above?"):
altered = True
- print "XXX do something"
+ print("XXX do something")
continue
else:
- print _(" Solutions:")
+ print(_(" Solutions:"))
i = 0
for solution in solutions:
i += 1
- print " #%d: %s" % (i, solution)
+ print(" #%d: %s" % (i, solution))
- print
+ print()
if not altered:
return False
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
import subprocess
import tempfile
import time
-import xmlrpclib
+import xmlrpc.client
import logging
log = logging.getLogger("pakfire")
pakfire.api.dist(pkgs, resultdirs=[tmpdir,], **pakfire_args)
# Create a kind of dummy repository to link the packages against it.
- if pakfire_args.has_key("build_id"):
+ if "build_id" in pakfire_args:
del pakfire_args["build_id"]
pakfire_args["mode"] = "server"
return self.update_files(_files)
-class XMLRPCTransport(xmlrpclib.Transport):
+class XMLRPCTransport(xmlrpc.client.Transport):
user_agent = "pakfire/%s" % PAKFIRE_VERSION
def single_request(self, *args, **kwargs):
while tries:
try:
- ret = xmlrpclib.Transport.single_request(self, *args, **kwargs)
+ ret = xmlrpc.client.Transport.single_request(self, *args, **kwargs)
- except socket.error, e:
+ except socket.error as e:
# These kinds of errors are not fatal, but they can happen on
# a bad internet connection or whatever.
# 32 Broken pipe
if not e.errno in (32, 110, 111,):
raise
- except xmlrpclib.ProtocolError, e:
+ except xmlrpc.client.ProtocolError as e:
# Log all XMLRPC protocol errors.
log.error("XMLRPC protocol error:")
log.error(" URL: %s" % e.url)
log.error(" HTTP headers:")
- for header in e.headers.items():
+ for header in list(e.headers.items()):
log.error(" %s: %s" % header)
log.error(" Error code: %s" % e.errcode)
log.error(" Error message: %s" % e.errmsg)
else:
log.error("Maximum number of tries was reached. Giving up.")
# XXX need better exception here.
- raise Exception, "Could not fulfill request."
+ raise Exception("Could not fulfill request.")
return ret
-class ServerProxy(xmlrpclib.ServerProxy):
+class ServerProxy(xmlrpc.client.ServerProxy):
def __init__(self, server, *args, **kwargs):
# Some default settings.
- if not kwargs.has_key("transport"):
+ if "transport" not in kwargs:
kwargs["transport"] = XMLRPCTransport()
kwargs["allow_none"] = True
- xmlrpclib.ServerProxy.__init__(self, server, *args, **kwargs)
+ xmlrpc.client.ServerProxy.__init__(self, server, *args, **kwargs)
class Server(object):
log.info("Uploading chunk %s/%s of %s." % (chunk, chunks,
os.path.basename(filename)))
- data = xmlrpclib.Binary(data)
+ data = xmlrpc.client.Binary(data)
self.conn.upload_chunk(upload_id, data)
# Tell the server, that we finished the upload.
# If the server sends false, something happened with the upload that
# could not be recovered.
if not ret:
- raise Exception, "Upload failed."
+ raise Exception("Upload failed.")
def update_build_status(self, build_id, status, message=""):
ret = self.conn.update_build_state(build_id, status, message)
# If the server returns False, then it did not acknowledge our status
# update and the build has to be aborted.
if not ret:
- raise BuildAbortedException, "The build was aborted by the master server."
+ raise BuildAbortedException("The build was aborted by the master server.")
def build_job(self, type=None):
build = self.conn.build_job() # XXX type=None
try:
func = job_types[build_type]
except KeyError:
- raise Exception, "Build type not supported: %s" % type
+ raise Exception("Build type not supported: %s" % type)
# Call the function that processes the build and try to catch general
# exceptions and report them to the server.
# This has already been reported by func.
raise
- except Exception, e:
+ except Exception as e:
# Format the exception and send it to the server.
message = "%s: %s" % (e.__class__.__name__, e)
# Check if the download checksum matches.
if pakfire.util.calc_hash1(tmpfile) == hash1:
- print "Checksum matches: %s" % hash1
+ print("Checksum matches: %s" % hash1)
else:
- raise DownloadError, "Download was corrupted"
+ raise DownloadError("Download was corrupted")
# Update the build status on the server.
self.update_build_status(build_id, "running")
self.upload_file(file, build_id)
- except DependencyError, e:
+ except DependencyError as e:
message = "%s: %s" % (e.__class__.__name__, e)
self.update_build_status(build_id, "dependency_error", message)
raise
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
import subprocess
import time
-from _pakfire import PERSONALITY_LINUX, PERSONALITY_LINUX32
+from ._pakfire import PERSONALITY_LINUX, PERSONALITY_LINUX32
from pakfire.i18n import _
import pakfire.util as util
-from errors import *
+from .errors import *
class ShellExecuteEnvironment(object):
def __init__(self, command, cwd=None, chroot_path=None, personality=None, shell=False, timeout=0, env=None,
os.killpg(child.pid, 9)
if not nice_exit:
- raise commandTimeoutExpired, (_("Command exceeded timeout (%(timeout)d): %(command)s") % (self.timeout, self.command))
+ raise commandTimeoutExpired(_("Command exceeded timeout (%(timeout)d): %(command)s") % (self.timeout, self.command))
# Save exitcode.
self.exitcode = child.returncode
self.logger.debug(_("Child returncode was: %s") % self.exitcode)
if self.exitcode and self.log_errors:
- raise ShellEnvironmentError, (_("Command failed: %s") % self.command, self.exitcode)
+ raise ShellEnvironmentError(_("Command failed: %s") % self.command, self.exitcode)
return self.exitcode
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
# #
###############################################################################
-from __future__ import division
-
import multiprocessing
import os
import socket
import tempfile
-import distro
-import shell
+from . import shell
from . import _pakfire
-from i18n import _
+from .i18n import _
class System(object):
"""
@property
def distro(self):
if not hasattr(self, "_distro"):
+ from . import distro
self._distro = distro.Distribution()
return self._distro
return ret or _("Could not be determined")
@property
- def cpu_bogomips(self):
- return _pakfire.performance_index()
+ def cpu_bogomips(self):
+ return _pakfire.performance_index()
def get_loadavg(self):
return os.getloadavg()
try:
handle, path = tempfile.mkstemp(prefix="ro-test-", dir=self.fullpath)
- except OSError, e:
+ except OSError as e:
# Read-only file system.
if e.errno == 30:
return True
shell=False,
)
shellenv.execute()
- except ShellEnvironmentError, e:
+ except ShellEnvironmentError as e:
raise OSError
if __name__ == "__main__":
- print "Hostname", system.hostname
- print "Arch", system.arch
- print "Supported arches", system.supported_arches
+ print("Hostname", system.hostname)
+ print("Arch", system.arch)
+ print("Supported arches", system.supported_arches)
- print "CPU Model", system.cpu_model
- print "CPU count", system.cpu_count
- print "Memory", system.memory
+ print("CPU Model", system.cpu_model)
+ print("CPU count", system.cpu_count)
+ print("Memory", system.memory)
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
###############################################################################
import os
-import progressbar
import sys
import time
-import i18n
-import packages
-import satsolver
-import system
-import util
-import _pakfire
+from . import _pakfire
+from . import i18n
+from . import packages
+from . import progressbar
+from . import system
+from . import util
import logging
log = logging.getLogger("pakfire")
-from constants import *
-from i18n import _
+from .constants import *
+from .i18n import _
# Import all actions directly.
-from actions import *
+from .actions import *
class TransactionCheck(object):
def __init__(self, pakfire, transaction):
def error_files(self):
ret = []
- for name, count in self.filelist.items():
+ for name, count in list(self.filelist.items()):
if count > 1:
ret.append(name)
self._steps = []
self.installsizechange = 0
- def __nonzero__(self):
+ def __bool__(self):
if self.steps:
return True
path_stat = os.statvfs(path)
if self.download_size >= path_stat.f_bavail * path_stat.f_bsize:
- raise DownloadError, _("Not enough space to download %s of packages.") \
- % util.format_size(self.download_size)
+ raise DownloadError(_("Not enough space to download %s of packages.") \
+ % util.format_size(self.download_size))
logger.info(_("Downloading packages:"))
time_start = time.time()
for action in actions:
try:
action.check(check)
- except ActionError, e:
+ except ActionError as e:
raise
if check.successful:
# and raise TransactionCheckError.
check.print_errors(logger=logger)
- raise TransactionCheckError, _("Transaction test was not successful")
+ raise TransactionCheckError(_("Transaction test was not successful"))
def verify_signatures(self, mode=None, logger=None):
"""
try:
step.pkg.verify()
- except SignatureError, e:
+ except SignatureError as e:
errors.append("%s" % e)
finally:
if p: p.finish()
# Raise a SignatureError in strict mode.
if mode == "strict":
- raise SignatureError, "\n".join(errors)
+ raise SignatureError("\n".join(errors))
elif mode == "permissive":
logger.warning(_("Found %s signature error(s)!") % len(errors))
try:
action.run()
- except ActionError, e:
+ except ActionError as e:
logger.error("Action finished with an error: %s - %s" % (action, e))
#except Exception, e:
# logger.error(_("An unforeseen error occoured: %s") % e)
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
# #
###############################################################################
-from __future__ import division
-
import base64
import hashlib
import json
import os
import time
-import urlgrabber
-import urllib
-import urlparse
+import urllib.request, urllib.parse, urllib.error
+import urllib.parse
import pakfire.downloader
import pakfire.util
import logging
log = logging.getLogger("pakfire.transport")
-
class PakfireHubTransportUploader(object):
"""
Handles the upload of a single file to the hub.
server, username, password = self.config.get_hub_credentials()
# Parse the given URL.
- url = urlparse.urlparse(server)
+ url = urllib.parse.urlparse(server)
assert url.scheme in ("http", "https")
# Build new URL.
try:
return self.grabber.urlread(url, **kwargs)
- except urlgrabber.grabber.URLGrabError, e:
+ except urlgrabber.grabber.URLGrabError as e:
# Timeout
if e.errno == 12:
- raise TransportConnectionTimeoutError, e
+ raise TransportConnectionTimeoutError(e)
# Handle common HTTP errors
elif e.errno == 14:
# Connection errors
if e.code == 5:
- raise TransportConnectionProxyError, url
+ raise TransportConnectionProxyError(url)
elif e.code == 6:
- raise TransportConnectionDNSError, url
+ raise TransportConnectionDNSError(url)
elif e.code == 7:
- raise TransportConnectionResetError, url
+ raise TransportConnectionResetError(url)
elif e.code == 23:
- raise TransportConnectionWriteError, url
+ raise TransportConnectionWriteError(url)
elif e.code == 26:
- raise TransportConnectionReadError, url
+ raise TransportConnectionReadError(url)
# SSL errors
elif e.code == 52:
- raise TransportSSLCertificateExpiredError, url
+ raise TransportSSLCertificateExpiredError(url)
# HTTP error codes
elif e.code == 403:
- raise TransportForbiddenError, url
+ raise TransportForbiddenError(url)
elif e.code == 404:
- raise TransportNotFoundError, url
+ raise TransportNotFoundError(url)
elif e.code == 500:
- raise TransportInternalServerError, url
+ raise TransportInternalServerError(url)
elif e.code in (502, 503):
- raise TransportBadGatewayError, url
+ raise TransportBadGatewayError(url)
elif e.code == 504:
- raise TransportConnectionTimeoutError, url
+ raise TransportConnectionTimeoutError(url)
# All other exceptions...
raise
return self.one_request(url, **kwargs)
# 500 - Internal Server Error, 502 + 503 Bad Gateway Error
- except (TransportInternalServerError, TransportBadGatewayError), e:
+ except (TransportInternalServerError, TransportBadGatewayError) as e:
log.exception("%s" % e.__class__.__name__)
# Wait a minute before trying again.
time.sleep(60)
# Retry on connection problems.
- except TransportConnectionError, e:
+ except TransportConnectionError as e:
log.exception("%s" % e.__class__.__name__)
# Wait for 10 seconds.
raise TransportMaxTriesExceededError
def escape_args(self, **kwargs):
- return urllib.urlencode(kwargs)
+ return urllib.parse.urlencode(kwargs)
def get(self, url, data={}, **kwargs):
"""
-#!/usr/bin/python
+#!/usr/bin/python3
###############################################################################
# #
# Pakfire - The IPFire package management system #
# #
###############################################################################
-from __future__ import division
-
import fcntl
import hashlib
import math
import os
-import progressbar
import random
import shutil
import signal
import logging
log = logging.getLogger("pakfire")
-from constants import *
-from i18n import _
+from .constants import *
+from .i18n import _
# Import binary version of version_compare and capability functions
-from _pakfire import version_compare, get_capabilities, set_capabilities, personality
+from ._pakfire import version_compare, get_capabilities, set_capabilities, personality
def cli_is_interactive():
"""
if not cli_is_interactive():
return True
- print _("%s [y/N]") % question,
- ret = raw_input()
- print # Just an empty line.
+ print(_("%s [y/N]") % question, end=' ')
+ ret = input()
+ print() # Just an empty line.
return ret in ("y", "Y", "z", "Z", "j", "J")
s = ""
for i in range(length):
- s += random.choice(string.letters)
+ s += random.choice(string.ascii_letters)
return s
def make_progress(message, maxval, eta=True, speed=False):
+ # XXX delay importing the progressbar module
+ # (because of a circular dependency)
+ from . import progressbar
+
# Return nothing if stdout is not a terminal.
if not sys.stdout.isatty():
return
tryAgain = 0
try:
shutil.rmtree(path, *args, **kargs)
- except OSError, e:
+ except OSError as e:
if e.errno == 2: # no such file or directory
pass
elif e.errno==1 or e.errno==13:
pid = int(fn, 10)
os.kill(pid, killsig)
os.waitpid(pid, 0)
- except OSError, e:
+ except OSError as e:
pass
# If something was killed, wait a couple of seconds to make sure all file descriptors
-#!/usr/bin/python
+#!/usr/bin/python3
import os
import sys
from pakfire.cli import *
from pakfire.i18n import _
-except ImportError, e:
+except ImportError as e:
# Catch ImportError and show a more user-friendly message about what
# went wrong.
+ raise
# Try to load at least the i18n support, but when this fails as well we can
# go with an English error message.
_ = lambda x: x
# XXX Maybe we can make a more beautiful message here?!
- print _("There has been an error when trying to import one or more of the"
- " modules, that are required to run Pakfire.")
- print _("Please check your installation of Pakfire.")
- print
- print _("The error that lead to this:")
- print " ", e
- print
+ print(_("There has been an error when trying to import one or more of the"
+ " modules, that are required to run Pakfire."))
+ print(_("Please check your installation of Pakfire."))
+ print()
+ print(_("The error that lead to this:"))
+ print(" ", e)
+ print()
# Exit immediately.
sys.exit(1)
# Check if the program was called with a weird basename.
# If so, we exit immediately.
-if not basename2cls.has_key(basename):
+if basename not in basename2cls:
sys.exit(127)
# Return code for the shell.
ret = 1
# Catch all errors and show a user-friendly error message.
-except Error, e:
+except Error as e:
log.critical("")
log.critical(_("An error has occured when running Pakfire."))
log.error("")