diff --git a/bashlex/parser.py b/bashlex/parser.py index ddf59066..d28186b3 100644 --- a/bashlex/parser.py +++ b/bashlex/parser.py @@ -360,6 +360,8 @@ def p_elif_clause(p): for i in range(1, len(p)): if isinstance(p[i], ast.node): parts.append(p[i]) + elif isinstance(p[i], list): + parts.extend(p[i]) else: parts.append(ast.node(kind='reservedword', word=p[i], pos=p.lexspan(i))) p[0] = parts diff --git a/bashlex/tokenizer.py b/bashlex/tokenizer.py index 46ed3980..a69aa457 100644 --- a/bashlex/tokenizer.py +++ b/bashlex/tokenizer.py @@ -969,11 +969,14 @@ def handledollarword(): def _is_assignment(self, value, iscompassign): c = value[0] - def legalvariablechar(x): + def legalvariablestarter(x): return x.isalpha() or x == '_' - if not legalvariablechar(c): - return + def legalvariablechar(x): + return x.isalnum() or x == '_' + + if not legalvariablestarter(c): + return False for i, c in enumerate(value): if c == '=': diff --git a/tests/test-tokenizer.py b/tests/test-tokenizer.py index de70f18d..9871c074 100644 --- a/tests/test-tokenizer.py +++ b/tests/test-tokenizer.py @@ -24,6 +24,14 @@ def assertTokens(self, s, tokens): for t in tokens: self.assertEquals(str(t.value), s[t.lexpos:t.endlexpos]) + def assertNotTokens(self, s, tokens): + result = tokenize(s) + + if result[-1].value == '\n': + result.pop() + + self.assertNotEqual(result, tokens) + def test_empty_string(self): self.assertEquals(len(tokenize('')), 0) @@ -331,3 +339,19 @@ def test_tokenize(self): t(tt.WORD, 'a', [0, 1]), t(tt.WORD, "'b '", [2, 7], set([flags.word.QUOTED])), t(tt.WORD, 'c', [8, 9])]) + + def test_variables(self): + s = 'a0_=b' + self.assertTokens(s, [ + t(tt.ASSIGNMENT_WORD, 'a0_=b', [0, 5], + flags=set([flags.word.NOSPLIT, flags.word.ASSIGNMENT]))]) + + s = 'a0_+=b' + self.assertTokens(s, [ + t(tt.ASSIGNMENT_WORD, 'a0_+=b', [0, 6], + flags=set([flags.word.NOSPLIT, flags.word.ASSIGNMENT]))]) + + s = '0var=1' + self.assertTokens(s, [ + t(tt.WORD, '0var=1', [0, 6], + flags=set([]))]) \ No newline at end of file