]> git.ipfire.org Git - thirdparty/libvirt.git/commitdiff
scripts: fix tokenizing of enum parameters in API builder
authorDaniel P. Berrangé <berrange@redhat.com>
Tue, 19 May 2020 11:27:15 +0000 (12:27 +0100)
committerDaniel P. Berrangé <berrange@redhat.com>
Fri, 22 May 2020 10:32:55 +0000 (11:32 +0100)
The API build script tokenizes enums declarations by first splitting on
whitespace. This is unhelpful as it means an enum

 # define VIR_USE_CPU(cpumap, cpu) ((cpumap)[(cpu) / 8] |= (1 << ((cpu) % 8)))

Gets tokenized as

  #define
  VIR_USE_CPU(cpumap,
  cpu)
  ((cpumap)[(cpu)
  /
  8]
  |=
  (1
  <<
  ((cpu)
  %
  8)))

With this change, the set of parameters are all merged into the first
token:

  #define
  VIR_USE_CPU(cpumap,cpu)
  ((cpumap)[(cpu)
  /
  8]
  |=
  (1
  <<
  ((cpu)
  %
  8)))

which is more convenient to process later on in the script.

Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
scripts/apibuild.py

index b13b5db64410f6d48153eb5ae3069d2ff6c4bf35..68c588d8b67d9fb7d377684dffa3554c798618d6 100755 (executable)
@@ -494,6 +494,28 @@ class CLexer:
                 if self.tokens[0][1] == "#":
                     self.tokens[0] = ('preproc', "#" + self.tokens[1][1])
                     del self.tokens[1]
+
+                if self.tokens[0][1] == "#define" and "(" in self.tokens[1][1]:
+                    newtokens = [self.tokens[0]]
+
+                    endArg = self.tokens[1][1].find(")")
+                    if endArg != -1:
+                        extra = self.tokens[1][1][endArg+1:]
+                        name = self.tokens[1][1][0:endArg+1]
+                        newtokens.append(('preproc', name))
+                        if extra != "":
+                            newtokens.append(('preproc', extra))
+                    else:
+                        name = self.tokens[1][1]
+                        for token in self.tokens[2:]:
+                            if name is not None:
+                                name = name + token[1]
+                                if ")" in token[1]:
+                                    newtokens.append(('preproc', name))
+                                    name = None
+                            else:
+                                newtokens.append(token)
+                    self.tokens = newtokens
                 break
             nline = len(line)
             if line[0] == '"' or line[0] == "'":