Improve integer parser
This commit is contained in:
@@ -44,15 +44,7 @@ class TypeLiteral(Atom):
|
|||||||
|
|
||||||
|
|
||||||
def IntegerParser(input):
|
def IntegerParser(input):
|
||||||
return Parser.oneOf(
|
return Parser.terminal(TokenType.INTEGER, createNode=IntegerLiteral.withValue)(input)
|
||||||
Parser.terminal(TokenType.INTEGER, lambda val, pos: IntegerLiteral.withValue(int(val), pos)),
|
|
||||||
Parser.allOf(
|
|
||||||
Parser.terminal(TokenType.MINUS),
|
|
||||||
Parser.terminal(TokenType.INTEGER, lambda val, pos: IntegerLiteral.withValue(int(val), pos)),
|
|
||||||
createNode=lambda minus, integer: IntegerLiteral.withValue(-integer.value, minus.pos),
|
|
||||||
name="negative integer"
|
|
||||||
)
|
|
||||||
)(input)
|
|
||||||
|
|
||||||
|
|
||||||
def StringParser(input):
|
def StringParser(input):
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from smnp.token.tokenizers.note import noteTokenizer
|
|||||||
from smnp.token.tokenizers.relation import relationOperatorTokenizer
|
from smnp.token.tokenizers.relation import relationOperatorTokenizer
|
||||||
from smnp.token.tokenizers.string import stringTokenizer
|
from smnp.token.tokenizers.string import stringTokenizer
|
||||||
from smnp.token.tokenizers.whitespace import whitespacesTokenizer
|
from smnp.token.tokenizers.whitespace import whitespacesTokenizer
|
||||||
from smnp.token.tools import defaultTokenizer, separated, regexPatternTokenizer
|
from smnp.token.tools import defaultTokenizer, separated, regexPatternTokenizer, mapValue
|
||||||
from smnp.token.type import TokenType
|
from smnp.token.type import TokenType
|
||||||
|
|
||||||
tokenizers = (
|
tokenizers = (
|
||||||
@@ -39,7 +39,7 @@ tokenizers = (
|
|||||||
defaultTokenizer(TokenType.DOT),
|
defaultTokenizer(TokenType.DOT),
|
||||||
|
|
||||||
# Types
|
# Types
|
||||||
separated(regexPatternTokenizer(TokenType.INTEGER, r'\d')),
|
mapValue(separated(regexPatternTokenizer(TokenType.INTEGER, r'\d')), int),
|
||||||
stringTokenizer,
|
stringTokenizer,
|
||||||
noteTokenizer,
|
noteTokenizer,
|
||||||
boolTokenizer,
|
boolTokenizer,
|
||||||
|
|||||||
@@ -50,3 +50,14 @@ def separated(tokenizer, end=r"\W"):
|
|||||||
return (0, None)
|
return (0, None)
|
||||||
|
|
||||||
return separated
|
return separated
|
||||||
|
|
||||||
|
|
||||||
|
def mapValue(tokenizer, mapper):
|
||||||
|
def tokenize(input, current, line):
|
||||||
|
consumedChars, token = tokenizer(input, current, line)
|
||||||
|
if consumedChars > 0:
|
||||||
|
return (consumedChars, Token(token.type, mapper(token.value), token.pos))
|
||||||
|
|
||||||
|
return (0, None)
|
||||||
|
|
||||||
|
return tokenize
|
||||||
|
|||||||
Reference in New Issue
Block a user