]> git.ipfire.org Git - thirdparty/openembedded/openembedded-core-contrib.git/commitdiff
codeparser.py: support deeply nested tokens
authorPatrick Ohly <patrick.ohly@intel.com>
Fri, 18 Nov 2016 15:23:22 +0000 (16:23 +0100)
committerRichard Purdie <richard.purdie@linuxfoundation.org>
Wed, 30 Nov 2016 15:47:46 +0000 (15:47 +0000)
For shell constructs like
   echo hello & wait $!
the process_tokens() method ended up with a situation where "token"
in the "name, value = token" assignment was a list of tuples
and not the expected tuple, causing the assignment to fail.

There were already two for loops (one in _parse_shell(), one in
process_tokens()) which iterated over token lists. Apparently the
actual nesting can also be deeper.

Now there is just one such loop in process_token_list() which calls
itself recursively when it detects that a list entry is another list.

As a side effect (improvement?!) of the loop removal in
_parse_shell(), the local function definitions in process_tokens() get
executed less often.

Fixes: [YOCTO #10668]
Signed-off-by: Patrick Ohly <patrick.ohly@intel.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
lib/bb/codeparser.py

index 25938d6586eff8900bea91e473cb658f73fadff7..5d2d44065ac584547865f6f1b9dde075396e85d1 100644 (file)
@@ -342,8 +342,7 @@ class ShellParser():
         except pyshlex.NeedMore:
             raise sherrors.ShellSyntaxError("Unexpected EOF")
 
-        for token in tokens:
-            self.process_tokens(token)
+        self.process_tokens(tokens)
 
     def process_tokens(self, tokens):
         """Process a supplied portion of the syntax tree as returned by
@@ -389,18 +388,24 @@ class ShellParser():
             "case_clause": case_clause,
         }
 
-        for token in tokens:
-            name, value = token
-            try:
-                more_tokens, words = token_handlers[name](value)
-            except KeyError:
-                raise NotImplementedError("Unsupported token type " + name)
+        def process_token_list(tokens):
+            for token in tokens:
+                if isinstance(token, list):
+                    process_token_list(token)
+                    continue
+                name, value = token
+                try:
+                    more_tokens, words = token_handlers[name](value)
+                except KeyError:
+                    raise NotImplementedError("Unsupported token type " + name)
+
+                if more_tokens:
+                    self.process_tokens(more_tokens)
 
-            if more_tokens:
-                self.process_tokens(more_tokens)
+                if words:
+                    self.process_words(words)
 
-            if words:
-                self.process_words(words)
+        process_token_list(tokens)
 
     def process_words(self, words):
         """Process a set of 'words' in pyshyacc parlance, which includes