Refactor tokenizer
This commit is contained in:
@@ -1,5 +0,0 @@
|
||||
from smnp.token.tools import charTokenizer
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeAssign(input, current, line):
|
||||
return charTokenizer(TokenType.ASSIGN, '=')(input, current, line)
|
||||
@@ -1,6 +0,0 @@
|
||||
from smnp.token.tools import charTokenizer
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
|
||||
def tokenizeAsterisk(input, current, line):
|
||||
return charTokenizer(TokenType.ASTERISK, '*')(input, current, line)
|
||||
@@ -1,10 +0,0 @@
|
||||
from smnp.token.tools import charTokenizer
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
|
||||
def tokenizeOpenBracket(input, current, line):
|
||||
return charTokenizer(TokenType.OPEN_BRACKET, '{')(input, current, line)
|
||||
|
||||
|
||||
def tokenizeCloseBracket(input, current, line):
|
||||
return charTokenizer(TokenType.CLOSE_BRACKET, '}')(input, current, line)
|
||||
@@ -1,6 +0,0 @@
|
||||
from smnp.token.tools import charTokenizer
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
|
||||
def tokenizeComma(input, current, line):
|
||||
return charTokenizer(TokenType.COMMA, ',')(input, current, line)
|
||||
@@ -1,7 +1,8 @@
|
||||
from smnp.token.type import TokenType
|
||||
from smnp.token.model import Token
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeComment(input, current, line):
|
||||
|
||||
def commentTokenizer(input, current, line):
|
||||
if input[current] == '#':
|
||||
consumedChars = 0
|
||||
value = ''
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
from smnp.token.tools import charTokenizer
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeDot(input, current, line):
|
||||
return charTokenizer(TokenType.DOT, '.')(input, current, line)
|
||||
@@ -1,6 +1,6 @@
|
||||
from smnp.token.tools import regexPatternTokenizer
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeIdentifier(input, current, line):
|
||||
def identifierTokenizer(input, current, line):
|
||||
# TODO: Disallow to create identifiers beggining from a number
|
||||
return regexPatternTokenizer(TokenType.IDENTIFIER, r'\w')(input, current, line)
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
from smnp.token.tools import regexPatternTokenizer, separate
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeInteger(input, current, line):
|
||||
return separate(regexPatternTokenizer(TokenType.INTEGER, r'\d'))(input, current, line)
|
||||
@@ -1,34 +1,10 @@
|
||||
from smnp.token.tools import keywordsTokenizer, keywordTokenizer, separate
|
||||
from smnp.token.tools import keywordsTokenizer, separated
|
||||
from smnp.token.type import TokenType
|
||||
from smnp.type.model import Type
|
||||
|
||||
|
||||
def tokenizeType(input, current, line):
|
||||
types = [ type.name.lower() for type in Type ]
|
||||
return separate(keywordsTokenizer(TokenType.TYPE, *types))(input, current, line)
|
||||
typeTokenizer = separated(keywordsTokenizer(TokenType.TYPE, *[type.name.lower() for type in Type]))
|
||||
|
||||
|
||||
def tokenizeReturn(input, current, line):
|
||||
return separate(keywordTokenizer(TokenType.RETURN, 'return'))(input, current, line)
|
||||
|
||||
|
||||
def tokenizeFunction(input, current, line):
|
||||
return separate(keywordTokenizer(TokenType.FUNCTION, 'function'))(input, current, line)
|
||||
|
||||
|
||||
def tokenizeExtend(input, current, line):
|
||||
return separate(keywordTokenizer(TokenType.EXTEND, "extend"))(input, current, line)
|
||||
|
||||
|
||||
def tokenizeImport(input, current, line):
|
||||
return separate(keywordTokenizer(TokenType.IMPORT, "import"))(input, current, line)
|
||||
|
||||
|
||||
def tokenizeFrom(input, current, line):
|
||||
return separate(keywordTokenizer(TokenType.FROM, "from"))(input, current, line)
|
||||
|
||||
|
||||
def tokenizeAs(input, current, line):
|
||||
return separate(keywordTokenizer(TokenType.AS, "as"))(input, current, line)
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
from smnp.token.tools import charTokenizer
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeMinus(input, current, line):
|
||||
return charTokenizer(TokenType.MINUS, '-')(input, current, line)
|
||||
@@ -5,7 +5,7 @@ from smnp.token.model import Token
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
|
||||
def tokenizeNote(input, current, line):
|
||||
def noteTokenizer(input, current, line):
|
||||
consumedChars = 0
|
||||
notePitch = None
|
||||
octave = None
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
from smnp.token.tools import charTokenizer
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
|
||||
def tokenizeOpenParen(input, current, line):
|
||||
return charTokenizer(TokenType.OPEN_PAREN, '(')(input, current, line)
|
||||
|
||||
def tokenizeCloseParen(input, current, line):
|
||||
return charTokenizer(TokenType.CLOSE_PAREN, ')')(input, current, line)
|
||||
@@ -1,5 +0,0 @@
|
||||
from smnp.token.tools import charTokenizer
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizePercent(input, current, line):
|
||||
return charTokenizer(TokenType.PERCENT, '%')(input, current, line)
|
||||
@@ -1,8 +0,0 @@
|
||||
from smnp.token.tools import charTokenizer
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeOpenSquare(input, current, line):
|
||||
return charTokenizer(TokenType.OPEN_SQUARE, '[')(input, current, line)
|
||||
|
||||
def tokenizeCloseSquare(input, current, line):
|
||||
return charTokenizer(TokenType.CLOSE_SQUARE, ']')(input, current, line)
|
||||
@@ -1,13 +1,14 @@
|
||||
from smnp.token.type import TokenType
|
||||
from smnp.token.model import Token
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeString(input, current, line):
|
||||
|
||||
def stringTokenizer(input, current, line):
|
||||
if input[current] == '"':
|
||||
value = input[current]
|
||||
char = ''
|
||||
consumedChars = 1
|
||||
while char != '"':
|
||||
if char is None: #TODO!!!
|
||||
if char is None: # TODO!!!
|
||||
print("String not terminated")
|
||||
char = input[current + consumedChars]
|
||||
value += char
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
from smnp.token.tools import regexPatternTokenizer
|
||||
|
||||
def tokenizeWhitespaces(input, current, line):
|
||||
return regexPatternTokenizer(None, r'\s')(input, current, line)
|
||||
whitespacesTokenizer = regexPatternTokenizer(None, r'\s')
|
||||
|
||||
Reference in New Issue
Block a user