Add new token: TokenType.BOOL
This commit is contained in:
@@ -8,7 +8,7 @@ from smnp.program.interpreter import Interpreter
|
|||||||
def main():
|
def main():
|
||||||
try:
|
try:
|
||||||
stdLibraryEnv = loadStandardLibrary()
|
stdLibraryEnv = loadStandardLibrary()
|
||||||
Interpreter.interpretFile(sys.argv[1], printAst=True, baseEnvironment=stdLibraryEnv)
|
Interpreter.interpretFile(sys.argv[1], printTokens=True, printAst=False, baseEnvironment=stdLibraryEnv)
|
||||||
|
|
||||||
except SmnpException as e:
|
except SmnpException as e:
|
||||||
print(e.message())
|
print(e.message())
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ class Token:
|
|||||||
self.rawValue = rawValue
|
self.rawValue = rawValue
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "Token(" + str(self.type) + ", '" + str(self.value) + "', " + str(self.pos) + ")"
|
return "{" + str(self.type.name) + ", '" + str(self.value) + "', " + str(self.pos) + "}"
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return self.__str__()
|
return self.__str__()
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
from smnp.error.syntax import SyntaxException
|
from smnp.error.syntax import SyntaxException
|
||||||
from smnp.token.model import TokenList
|
from smnp.token.model import TokenList
|
||||||
|
from smnp.token.tokenizers.bool import boolTokenizer
|
||||||
from smnp.token.tokenizers.comment import commentTokenizer
|
from smnp.token.tokenizers.comment import commentTokenizer
|
||||||
from smnp.token.tokenizers.identifier import identifierTokenizer
|
from smnp.token.tokenizers.identifier import identifierTokenizer
|
||||||
from smnp.token.tokenizers.keyword import typeTokenizer
|
from smnp.token.tokenizers.keyword import typeTokenizer
|
||||||
@@ -31,8 +32,9 @@ tokenizers = (
|
|||||||
# Types
|
# Types
|
||||||
separated(regexPatternTokenizer(TokenType.INTEGER, r'\d')),
|
separated(regexPatternTokenizer(TokenType.INTEGER, r'\d')),
|
||||||
stringTokenizer,
|
stringTokenizer,
|
||||||
typeTokenizer,
|
|
||||||
noteTokenizer,
|
noteTokenizer,
|
||||||
|
boolTokenizer,
|
||||||
|
typeTokenizer,
|
||||||
|
|
||||||
# Keywords
|
# Keywords
|
||||||
separated(defaultTokenizer(TokenType.FUNCTION)),
|
separated(defaultTokenizer(TokenType.FUNCTION)),
|
||||||
@@ -50,7 +52,6 @@ tokenizers = (
|
|||||||
commentTokenizer,
|
commentTokenizer,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
filters = [
|
filters = [
|
||||||
lambda token: token.type is not None,
|
lambda token: token.type is not None,
|
||||||
lambda token: token.type != TokenType.COMMENT
|
lambda token: token.type != TokenType.COMMENT
|
||||||
@@ -58,33 +59,33 @@ filters = [
|
|||||||
|
|
||||||
|
|
||||||
def tokenize(lines):
|
def tokenize(lines):
|
||||||
tokens = []
|
tokens = []
|
||||||
for lineNumber, line in enumerate(lines):
|
for lineNumber, line in enumerate(lines):
|
||||||
current = 0
|
current = 0
|
||||||
while current < len(line):
|
while current < len(line):
|
||||||
consumedChars, token = combinedTokenizer(line, current, lineNumber)
|
consumedChars, token = combinedTokenizer(line, current, lineNumber)
|
||||||
|
|
||||||
if consumedChars == 0:
|
if consumedChars == 0:
|
||||||
raise SyntaxException(f"Unknown symbol '{line[current]}'", (lineNumber, current))
|
raise SyntaxException(f"Unknown symbol '{line[current]}'", (lineNumber, current))
|
||||||
|
|
||||||
current += consumedChars
|
current += consumedChars
|
||||||
tokens.append(token)
|
tokens.append(token)
|
||||||
|
|
||||||
return TokenList(filterTokens(filters, tokens), lines)
|
return TokenList(filterTokens(filters, tokens), lines)
|
||||||
|
|
||||||
|
|
||||||
def combinedTokenizer(line, current, lineNumber):
|
def combinedTokenizer(line, current, lineNumber):
|
||||||
for tokenizer in tokenizers:
|
for tokenizer in tokenizers:
|
||||||
consumedChars, token = tokenizer(line, current, lineNumber)
|
consumedChars, token = tokenizer(line, current, lineNumber)
|
||||||
if consumedChars > 0:
|
if consumedChars > 0:
|
||||||
return (consumedChars, token)
|
return (consumedChars, token)
|
||||||
return (0, None)
|
return (0, None)
|
||||||
|
|
||||||
|
|
||||||
def filterTokens(filters, tokens):
|
def filterTokens(filters, tokens):
|
||||||
if not filters:
|
if not filters:
|
||||||
return tokens
|
return tokens
|
||||||
|
|
||||||
return list(filterTokens(filters[1:], (token for token in tokens if filters[0](token))))
|
return list(filterTokens(filters[1:], (token for token in tokens if filters[0](token))))
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
11
smnp/token/tokenizers/bool.py
Normal file
11
smnp/token/tokenizers/bool.py
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
from smnp.token.tools import keywordsTokenizer, separated
|
||||||
|
from smnp.token.type import TokenType
|
||||||
|
|
||||||
|
|
||||||
|
def boolTokenizer(input, current, line):
|
||||||
|
consumedChars, token = separated(keywordsTokenizer(TokenType.BOOL, "true", "false"))(input, current, line)
|
||||||
|
if consumedChars > 0:
|
||||||
|
token.value = token.value == "true"
|
||||||
|
return (consumedChars, token)
|
||||||
|
|
||||||
|
return (0, None)
|
||||||
@@ -21,6 +21,7 @@ class TokenType(Enum):
|
|||||||
INTEGER = 'integer'
|
INTEGER = 'integer'
|
||||||
STRING = 'string'
|
STRING = 'string'
|
||||||
NOTE = 'note'
|
NOTE = 'note'
|
||||||
|
BOOL = 'bool'
|
||||||
TYPE = 'type'
|
TYPE = 'type'
|
||||||
FUNCTION = 'function'
|
FUNCTION = 'function'
|
||||||
RETURN = 'return'
|
RETURN = 'return'
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ class Type(Enum):
|
|||||||
MAP = (dict, lambda x: '{' + ', '.join(f"'{k.stringify()}' -> '{v.stringify()}'" for k, v in x.items()) + '}')
|
MAP = (dict, lambda x: '{' + ', '.join(f"'{k.stringify()}' -> '{v.stringify()}'" for k, v in x.items()) + '}')
|
||||||
PERCENT = (float, lambda x: f"{int(x * 100)}%")
|
PERCENT = (float, lambda x: f"{int(x * 100)}%")
|
||||||
NOTE = (Note, lambda x: x.note.name)
|
NOTE = (Note, lambda x: x.note.name)
|
||||||
|
BOOL = (bool, lambda x: str(x).lower())
|
||||||
SOUND = (Sound, lambda x: x.file)
|
SOUND = (Sound, lambda x: x.file)
|
||||||
TYPE = (None, lambda x: x.name.lower())
|
TYPE = (None, lambda x: x.name.lower())
|
||||||
VOID = (type(None), lambda x: _failStringify(Type.VOID))
|
VOID = (type(None), lambda x: _failStringify(Type.VOID))
|
||||||
|
|||||||
Reference in New Issue
Block a user