Enable tokenizer to support separators between keywords and integers

This commit is contained in:
Bartłomiej Pluta
2019-07-06 13:35:21 +02:00
parent 675b1774fe
commit 9c4046ac2a
14 changed files with 90 additions and 57 deletions

View File

@@ -1,5 +1,5 @@
from smnp.token.tools import tokenizeChar
from smnp.token.tools import charTokenizer
from smnp.token.type import TokenType
def tokenizeAssign(input, current, line):
return tokenizeChar(TokenType.ASSIGN, '=', input, current, line)
return charTokenizer(TokenType.ASSIGN, '=')(input, current, line)

View File

@@ -1,5 +1,6 @@
from smnp.token.tools import tokenizeChar
from smnp.token.tools import charTokenizer
from smnp.token.type import TokenType
def tokenizeAsterisk(input, current, line):
return tokenizeChar(TokenType.ASTERISK, '*', input, current, line)
return charTokenizer(TokenType.ASTERISK, '*')(input, current, line)

View File

@@ -1,8 +1,10 @@
from smnp.token.tools import tokenizeChar
from smnp.token.tools import charTokenizer
from smnp.token.type import TokenType
def tokenizeOpenBracket(input, current, line):
return tokenizeChar(TokenType.OPEN_BRACKET, '{', input, current, line)
return charTokenizer(TokenType.OPEN_BRACKET, '{')(input, current, line)
def tokenizeCloseBracket(input, current, line):
return tokenizeChar(TokenType.CLOSE_BRACKET, '}', input, current, line)
return charTokenizer(TokenType.CLOSE_BRACKET, '}')(input, current, line)

View File

@@ -1,5 +1,6 @@
from smnp.token.tools import tokenizeChar
from smnp.token.tools import charTokenizer
from smnp.token.type import TokenType
def tokenizeComma(input, current, line):
return tokenizeChar(TokenType.COMMA, ',', input, current, line)
return charTokenizer(TokenType.COMMA, ',')(input, current, line)

View File

@@ -1,5 +1,5 @@
from smnp.token.tools import tokenizeChar
from smnp.token.tools import charTokenizer
from smnp.token.type import TokenType
def tokenizeDot(input, current, line):
return tokenizeChar(TokenType.DOT, '.', input, current, line)
return charTokenizer(TokenType.DOT, '.')(input, current, line)

View File

@@ -1,5 +1,6 @@
from smnp.token.tools import tokenizeRegexPattern
from smnp.token.tools import regexPatternTokenizer
from smnp.token.type import TokenType
def tokenizeIdentifier(input, current, line):
return tokenizeRegexPattern(TokenType.IDENTIFIER, r'\w', input, current, line)
# TODO: Disallow to create identifiers beggining from a number
return regexPatternTokenizer(TokenType.IDENTIFIER, r'\w')(input, current, line)

View File

@@ -1,5 +1,5 @@
from smnp.token.tools import tokenizeRegexPattern
from smnp.token.tools import regexPatternTokenizer, separate
from smnp.token.type import TokenType
def tokenizeInteger(input, current, line):
return tokenizeRegexPattern(TokenType.INTEGER, r'\d', input, current, line)
return separate(regexPatternTokenizer(TokenType.INTEGER, r'\d'))(input, current, line)

View File

@@ -1,34 +1,34 @@
from smnp.token.tools import tokenizeKeywords, tokenizeKeyword
from smnp.token.tools import keywordsTokenizer, keywordTokenizer, separate
from smnp.token.type import TokenType
from smnp.type.model import Type
def tokenizeType(input, current, line):
types = [ type.name.lower() for type in Type ]
return tokenizeKeywords(TokenType.TYPE, input, current, line, *types)
return separate(keywordsTokenizer(TokenType.TYPE, *types))(input, current, line)
def tokenizeReturn(input, current, line):
return tokenizeKeyword(TokenType.RETURN, 'return', input, current, line)
return separate(keywordTokenizer(TokenType.RETURN, 'return'))(input, current, line)
def tokenizeFunction(input, current, line):
return tokenizeKeyword(TokenType.FUNCTION, 'function', input, current, line)
return separate(keywordTokenizer(TokenType.FUNCTION, 'function'))(input, current, line)
def tokenizeExtend(input, current, line):
return tokenizeKeyword(TokenType.EXTEND, "extend", input, current, line)
return separate(keywordTokenizer(TokenType.EXTEND, "extend"))(input, current, line)
def tokenizeImport(input, current, line):
return tokenizeKeyword(TokenType.IMPORT, "import", input, current, line)
return separate(keywordTokenizer(TokenType.IMPORT, "import"))(input, current, line)
def tokenizeFrom(input, current, line):
return tokenizeKeyword(TokenType.FROM, "from", input, current, line)
return separate(keywordTokenizer(TokenType.FROM, "from"))(input, current, line)
def tokenizeAs(input, current, line):
return tokenizeKeyword(TokenType.AS, "as", input, current, line)
return separate(keywordTokenizer(TokenType.AS, "as"))(input, current, line)

View File

@@ -1,5 +1,5 @@
from smnp.token.tools import tokenizeChar
from smnp.token.tools import charTokenizer
from smnp.token.type import TokenType
def tokenizeMinus(input, current, line):
return tokenizeChar(TokenType.MINUS, '-', input, current, line)
return charTokenizer(TokenType.MINUS, '-')(input, current, line)

View File

@@ -1,8 +1,9 @@
from smnp.token.tools import tokenizeChar
from smnp.token.tools import charTokenizer
from smnp.token.type import TokenType
def tokenizeOpenParen(input, current, line):
return tokenizeChar(TokenType.OPEN_PAREN, '(', input, current, line)
return charTokenizer(TokenType.OPEN_PAREN, '(')(input, current, line)
def tokenizeCloseParen(input, current, line):
return tokenizeChar(TokenType.CLOSE_PAREN, ')', input, current, line)
return charTokenizer(TokenType.CLOSE_PAREN, ')')(input, current, line)

View File

@@ -1,5 +1,5 @@
from smnp.token.tools import tokenizeChar
from smnp.token.tools import charTokenizer
from smnp.token.type import TokenType
def tokenizePercent(input, current, line):
return tokenizeChar(TokenType.PERCENT, '%', input, current, line)
return charTokenizer(TokenType.PERCENT, '%')(input, current, line)

View File

@@ -1,8 +1,8 @@
from smnp.token.tools import tokenizeChar
from smnp.token.tools import charTokenizer
from smnp.token.type import TokenType
def tokenizeOpenSquare(input, current, line):
return tokenizeChar(TokenType.OPEN_SQUARE, '[', input, current, line)
return charTokenizer(TokenType.OPEN_SQUARE, '[')(input, current, line)
def tokenizeCloseSquare(input, current, line):
return tokenizeChar(TokenType.CLOSE_SQUARE, ']', input, current, line)
return charTokenizer(TokenType.CLOSE_SQUARE, ']')(input, current, line)

View File

@@ -1,4 +1,4 @@
from smnp.token.tools import tokenizeRegexPattern
from smnp.token.tools import regexPatternTokenizer
def tokenizeWhitespaces(input, current, line):
return tokenizeRegexPattern(None, r'\s', input, current, line)
return regexPatternTokenizer(None, r'\s')(input, current, line)