Add TypeParser (handling types list - specifiers etc.)
This commit is contained in:
@@ -67,7 +67,7 @@ def BoolParser(input):
|
||||
return Parser.terminalParser(TokenType.BOOL, createNode=BoolLiteral.withValue)(input)
|
||||
|
||||
|
||||
def TypeParser(input):
|
||||
def TypeLiteralParser(input):
|
||||
return Parser.terminalParser(TokenType.TYPE, createNode=TypeLiteral.withValue)(input)
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ def LiteralParser(input):
|
||||
StringParser,
|
||||
NoteParser,
|
||||
BoolParser,
|
||||
TypeParser,
|
||||
TypeLiteralParser,
|
||||
name="literal"
|
||||
)(input)
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ def ExpressionParser(input):
|
||||
|
||||
expr2 = Parser.leftAssociativeOperatorParser(
|
||||
expr1,
|
||||
[TokenType.RELATION],
|
||||
[TokenType.RELATION, TokenType.OPEN_ANGLE, TokenType.CLOSE_ANGLE],
|
||||
expr1,
|
||||
lambda left, op, right: Expression.withValue(BinaryOperator.withValues(left, op, right))
|
||||
)
|
||||
|
||||
@@ -5,7 +5,79 @@
|
||||
# from smnp.token.type import TokenType
|
||||
# from smnp.type.model import Type
|
||||
#
|
||||
#
|
||||
from smnp.ast.node.atom import TypeLiteralParser
|
||||
from smnp.ast.node.iterable import abstractIterableParser
|
||||
from smnp.ast.node.model import Node
|
||||
from smnp.ast.node.none import NoneNode
|
||||
from smnp.ast.parser import Parser
|
||||
from smnp.token.type import TokenType
|
||||
|
||||
|
||||
class Type(Node):
|
||||
def __init__(self, pos):
|
||||
super().__init__(pos)
|
||||
self.children = [NoneNode(), NoneNode()]
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return self[0]
|
||||
|
||||
@type.setter
|
||||
def type(self, value):
|
||||
self[0] = value
|
||||
|
||||
@property
|
||||
def specifiers(self):
|
||||
return self[1]
|
||||
|
||||
@specifiers.setter
|
||||
def specifiers(self, value):
|
||||
self[1] = value
|
||||
|
||||
@classmethod
|
||||
def withValues(cls, pos, type, specifiers=NoneNode()):
|
||||
node = cls(pos)
|
||||
node.type = type
|
||||
node.specifiers = specifiers
|
||||
return node
|
||||
|
||||
|
||||
class TypesList(Node):
|
||||
pass
|
||||
|
||||
|
||||
def TypesListParser(input):
|
||||
typeListItem = Parser.oneOf(
|
||||
TypeParser
|
||||
)
|
||||
|
||||
return abstractIterableParser(
|
||||
TypesList,
|
||||
TokenType.OPEN_ANGLE,
|
||||
TokenType.CLOSE_ANGLE,
|
||||
TypeParser
|
||||
)(input)
|
||||
|
||||
|
||||
class TypeSpecifiers(Node):
|
||||
pass
|
||||
|
||||
|
||||
def TypeParser(input):
|
||||
typeWithSpecifier = Parser.allOf(
|
||||
TypeLiteralParser,
|
||||
Parser.many(TypesListParser, createNode=TypeSpecifiers.withChildren),
|
||||
createNode=lambda type, specifiers: Type.withValues(type.pos, type, specifiers),
|
||||
name="type with specifiers?"
|
||||
)
|
||||
|
||||
return Parser.oneOf(
|
||||
typeWithSpecifier,
|
||||
TypesListParser,
|
||||
name="mult. types or type with specifier"
|
||||
)(input)
|
||||
|
||||
|
||||
# class TypeSpecifier(Node):
|
||||
#
|
||||
# @classmethod
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
import sys
|
||||
|
||||
from smnp.ast.node.type import TypeParser
|
||||
from smnp.error.base import SmnpException
|
||||
from smnp.program.interpreter import Interpreter
|
||||
from smnp.token.tokenizer import tokenize
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
#stdLibraryEnv = loadStandardLibrary()
|
||||
Interpreter.interpretFile(sys.argv[1], printTokens=True, printAst=True, execute=False, baseEnvironment=None)
|
||||
#Interpreter.interpretFile(sys.argv[1], printTokens=True, printAst=True, execute=False, baseEnvironment=None)
|
||||
#draft()
|
||||
tokens = tokenize(['<list, string>'])
|
||||
TypeParser(tokens).node.print()
|
||||
|
||||
except SmnpException as e:
|
||||
print(e.message())
|
||||
|
||||
@@ -3,7 +3,7 @@ from smnp.token.type import TokenType
|
||||
from smnp.type.model import Type
|
||||
|
||||
|
||||
typeTokenizer = separated(keywordsTokenizer(TokenType.TYPE, *[type.name.lower() for type in Type]))
|
||||
typeTokenizer = separated(keywordsTokenizer(TokenType.TYPE, *[type.name.lower() for type in Type], mapKeyword=lambda value: Type[value.upper()]))
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -3,4 +3,4 @@ from smnp.token.type import TokenType
|
||||
|
||||
|
||||
def relationOperatorTokenizer(input, current, line):
|
||||
return keywordsTokenizer(TokenType.RELATION, "==", "!=", ">=", "<=", ">", "<")(input, current, line)
|
||||
return keywordsTokenizer(TokenType.RELATION, "==", "!=", ">=", "<=")(input, current, line)
|
||||
@@ -16,10 +16,10 @@ def regexPatternTokenizer(type, pattern):
|
||||
return tokenizer
|
||||
|
||||
|
||||
def keywordsTokenizer(type, *keywords):
|
||||
def keywordsTokenizer(type, *keywords, mapKeyword=lambda x: x):
|
||||
def tokenizer(input, current, line):
|
||||
for keyword in keywords:
|
||||
result = keywordTokenizer(type, keyword)(input, current, line)
|
||||
result = keywordTokenizer(type, keyword, mapKeyword)(input, current, line)
|
||||
if result[0] > 0:
|
||||
return result
|
||||
return (0, None)
|
||||
@@ -27,10 +27,10 @@ def keywordsTokenizer(type, *keywords):
|
||||
return tokenizer
|
||||
|
||||
|
||||
def keywordTokenizer(type, keyword):
|
||||
def keywordTokenizer(type, keyword, mapKeyword=lambda x: x):
|
||||
def tokenizer(input, current, line):
|
||||
if len(input) >= current+len(keyword) and input[current:current+len(keyword)] == keyword:
|
||||
return (len(keyword), Token(type, keyword, (line, current)))
|
||||
return (len(keyword), Token(type, mapKeyword(keyword), (line, current)))
|
||||
return (0, None)
|
||||
return tokenizer
|
||||
|
||||
|
||||
Reference in New Issue
Block a user