Refactor tokenizer
This commit is contained in:
0
smnp/token/tokenizers/__init__.py
Normal file
0
smnp/token/tokenizers/__init__.py
Normal file
5
smnp/token/tokenizers/assign.py
Normal file
5
smnp/token/tokenizers/assign.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from smnp.token.tools import tokenizeChar
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeAssign(input, current, line):
|
||||
return tokenizeChar(TokenType.ASSIGN, '=', input, current, line)
|
||||
5
smnp/token/tokenizers/asterisk.py
Normal file
5
smnp/token/tokenizers/asterisk.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from smnp.token.tools import tokenizeChar
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeAsterisk(input, current, line):
|
||||
return tokenizeChar(TokenType.ASTERISK, '*', input, current, line)
|
||||
8
smnp/token/tokenizers/bracket.py
Normal file
8
smnp/token/tokenizers/bracket.py
Normal file
@@ -0,0 +1,8 @@
|
||||
from smnp.token.tools import tokenizeChar
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeOpenBracket(input, current, line):
|
||||
return tokenizeChar(TokenType.OPEN_BRACKET, '{', input, current, line)
|
||||
|
||||
def tokenizeCloseBracket(input, current, line):
|
||||
return tokenizeChar(TokenType.CLOSE_BRACKET, '}', input, current, line)
|
||||
5
smnp/token/tokenizers/colon.py
Normal file
5
smnp/token/tokenizers/colon.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from smnp.token.tools import tokenizeChar
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeColon(input, current, line):
|
||||
return tokenizeChar(TokenType.COLON, ':', input, current, line)
|
||||
5
smnp/token/tokenizers/comma.py
Normal file
5
smnp/token/tokenizers/comma.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from smnp.token.tools import tokenizeChar
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeComma(input, current, line):
|
||||
return tokenizeChar(TokenType.COMMA, ',', input, current, line)
|
||||
13
smnp/token/tokenizers/comment.py
Normal file
13
smnp/token/tokenizers/comment.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from smnp.token.type import TokenType
|
||||
from smnp.token.model import Token
|
||||
|
||||
def tokenizeComment(input, current, line):
|
||||
if input[current] == '#':
|
||||
consumedChars = 0
|
||||
value = ''
|
||||
while current+consumedChars < len(input):
|
||||
value += input[current+consumedChars]
|
||||
consumedChars += 1
|
||||
pass
|
||||
return (consumedChars, Token(TokenType.COMMENT, value, (line, current)))
|
||||
return (0, None)
|
||||
5
smnp/token/tokenizers/dot.py
Normal file
5
smnp/token/tokenizers/dot.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from smnp.token.tools import tokenizeChar
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeDot(input, current, line):
|
||||
return tokenizeChar(TokenType.DOT, '.', input, current, line)
|
||||
5
smnp/token/tokenizers/function.py
Normal file
5
smnp/token/tokenizers/function.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from smnp.token.tools import tokenizeKeyword
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeFunction(input, current, line):
|
||||
return tokenizeKeyword(TokenType.FUNCTION, 'function', input, current, line)
|
||||
5
smnp/token/tokenizers/identifier.py
Normal file
5
smnp/token/tokenizers/identifier.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from smnp.token.tools import tokenizeRegexPattern
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeIdentifier(input, current, line):
|
||||
return tokenizeRegexPattern(TokenType.IDENTIFIER, r'\w', input, current, line)
|
||||
5
smnp/token/tokenizers/integer.py
Normal file
5
smnp/token/tokenizers/integer.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from smnp.token.tools import tokenizeRegexPattern
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeInteger(input, current, line):
|
||||
return tokenizeRegexPattern(TokenType.INTEGER, r'\d', input, current, line)
|
||||
5
smnp/token/tokenizers/minus.py
Normal file
5
smnp/token/tokenizers/minus.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from smnp.token.tools import tokenizeChar
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeMinus(input, current, line):
|
||||
return tokenizeChar(TokenType.MINUS, '-', input, current, line)
|
||||
37
smnp/token/tokenizers/note.py
Normal file
37
smnp/token/tokenizers/note.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import re
|
||||
from smnp.token.type import TokenType
|
||||
from smnp.token.model import Token
|
||||
|
||||
def tokenizeNote(input, current, line):
|
||||
consumedChars = 0
|
||||
value = ''
|
||||
if input[current] == '@':
|
||||
consumedChars += 1
|
||||
value += input[current]
|
||||
if input[current+consumedChars] in ('C', 'c', 'D', 'd', 'E', 'e', 'F', 'f', 'G', 'g', 'A', 'a', 'H', 'h', 'B', 'b'):
|
||||
value += input[current+consumedChars]
|
||||
consumedChars += 1
|
||||
|
||||
if current+consumedChars < len(input) and input[current+consumedChars] in ('b', '#'):
|
||||
value += input[current+consumedChars]
|
||||
consumedChars += 1
|
||||
|
||||
if current+consumedChars < len(input) and re.match(r'\d', input[current+consumedChars]):
|
||||
value += input[current+consumedChars]
|
||||
consumedChars += 1
|
||||
|
||||
if current+consumedChars < len(input) and input[current+consumedChars] == '.':
|
||||
duration = input[current+consumedChars]
|
||||
consumedChars += 1
|
||||
while current+consumedChars < len(input) and re.match(r'\d', input[current+consumedChars]):
|
||||
duration += input[current+consumedChars]
|
||||
consumedChars += 1
|
||||
if current+consumedChars < len(input) and input[current+consumedChars] == 'd':
|
||||
duration += input[current+consumedChars]
|
||||
consumedChars += 1
|
||||
if len(duration) > 1:
|
||||
value += duration
|
||||
else:
|
||||
consumedChars -= 1
|
||||
return (consumedChars, Token(TokenType.NOTE, value, (line, current)))
|
||||
return (0, None)
|
||||
8
smnp/token/tokenizers/paren.py
Normal file
8
smnp/token/tokenizers/paren.py
Normal file
@@ -0,0 +1,8 @@
|
||||
from smnp.token.tools import tokenizeChar
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeOpenParen(input, current, line):
|
||||
return tokenizeChar(TokenType.OPEN_PAREN, '(', input, current, line)
|
||||
|
||||
def tokenizeCloseParen(input, current, line):
|
||||
return tokenizeChar(TokenType.CLOSE_PAREN, ')', input, current, line)
|
||||
5
smnp/token/tokenizers/percent.py
Normal file
5
smnp/token/tokenizers/percent.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from smnp.token.tools import tokenizeChar
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizePercent(input, current, line):
|
||||
return tokenizeChar(TokenType.PERCENT, '%', input, current, line)
|
||||
5
smnp/token/tokenizers/ret.py
Normal file
5
smnp/token/tokenizers/ret.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from smnp.token.tools import tokenizeKeyword
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
def tokenizeReturn(input, current, line):
|
||||
return tokenizeKeyword(TokenType.RETURN, 'return', input, current, line)
|
||||
16
smnp/token/tokenizers/string.py
Normal file
16
smnp/token/tokenizers/string.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from smnp.token.type import TokenType
|
||||
from smnp.token.model import Token
|
||||
|
||||
def tokenizeString(input, current, line):
|
||||
if input[current] == '"':
|
||||
value = input[current]
|
||||
char = ''
|
||||
consumedChars = 1
|
||||
while char != '"':
|
||||
if char is None: #TODO!!!
|
||||
print("String not terminated")
|
||||
char = input[current + consumedChars]
|
||||
value += char
|
||||
consumedChars += 1
|
||||
return (consumedChars, Token(TokenType.STRING, value, (line, current)))
|
||||
return (0, None)
|
||||
4
smnp/token/tokenizers/whitespace.py
Normal file
4
smnp/token/tokenizers/whitespace.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from smnp.token.tools import tokenizeRegexPattern
|
||||
|
||||
def tokenizeWhitespaces(input, current, line):
|
||||
return tokenizeRegexPattern(None, r'\s', input, current, line)
|
||||
Reference in New Issue
Block a user