Create tokenizer for float type
This commit is contained in:
@@ -2,6 +2,7 @@ from smnp.error.syntax import SyntaxException
|
|||||||
from smnp.token.model import TokenList
|
from smnp.token.model import TokenList
|
||||||
from smnp.token.tokenizers.bool import boolTokenizer
|
from smnp.token.tokenizers.bool import boolTokenizer
|
||||||
from smnp.token.tokenizers.comment import commentTokenizer
|
from smnp.token.tokenizers.comment import commentTokenizer
|
||||||
|
from smnp.token.tokenizers.float import floatTokenizer
|
||||||
from smnp.token.tokenizers.identifier import identifierTokenizer
|
from smnp.token.tokenizers.identifier import identifierTokenizer
|
||||||
from smnp.token.tokenizers.keyword import typeTokenizer
|
from smnp.token.tokenizers.keyword import typeTokenizer
|
||||||
from smnp.token.tokenizers.note import noteTokenizer
|
from smnp.token.tokenizers.note import noteTokenizer
|
||||||
@@ -41,6 +42,7 @@ tokenizers = (
|
|||||||
defaultTokenizer(TokenType.DOT),
|
defaultTokenizer(TokenType.DOT),
|
||||||
|
|
||||||
# Types
|
# Types
|
||||||
|
separated(floatTokenizer),
|
||||||
mapValue(separated(regexPatternTokenizer(TokenType.INTEGER, r'\d')), int),
|
mapValue(separated(regexPatternTokenizer(TokenType.INTEGER, r'\d')), int),
|
||||||
stringTokenizer,
|
stringTokenizer,
|
||||||
noteTokenizer,
|
noteTokenizer,
|
||||||
|
|||||||
25
smnp/token/tokenizers/float.py
Normal file
25
smnp/token/tokenizers/float.py
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
from smnp.token.model import Token
|
||||||
|
from smnp.token.tools import regexPatternTokenizer, keywordTokenizer
|
||||||
|
from smnp.token.type import TokenType
|
||||||
|
|
||||||
|
|
||||||
|
def floatTokenizer(input, current, line):
|
||||||
|
consumedChars = 0
|
||||||
|
value = ""
|
||||||
|
consumed, token = regexPatternTokenizer(TokenType.INTEGER, r'\d')(input, current, line)
|
||||||
|
if consumed > 0:
|
||||||
|
consumedChars += consumed
|
||||||
|
value += token.value
|
||||||
|
consumed, token = keywordTokenizer(TokenType.DOT, ".")(input, current+consumedChars, line)
|
||||||
|
if consumed > 0:
|
||||||
|
consumedChars += consumed
|
||||||
|
value += token.value
|
||||||
|
consumed, token = regexPatternTokenizer(TokenType.INTEGER, r'\d')(input, current+consumedChars, line)
|
||||||
|
if consumed > 0:
|
||||||
|
consumedChars += consumed
|
||||||
|
value += token.value
|
||||||
|
print(value)
|
||||||
|
return (consumedChars, Token(TokenType.FLOAT, float(value), (current, line), value))
|
||||||
|
|
||||||
|
|
||||||
|
return (0, None)
|
||||||
@@ -30,6 +30,7 @@ class TokenType(Enum):
|
|||||||
NOT = 'not'
|
NOT = 'not'
|
||||||
INTEGER = 'integer'
|
INTEGER = 'integer'
|
||||||
STRING = 'string'
|
STRING = 'string'
|
||||||
|
FLOAT = 'float'
|
||||||
NOTE = 'note'
|
NOTE = 'note'
|
||||||
BOOL = 'bool'
|
BOOL = 'bool'
|
||||||
TYPE = 'type'
|
TYPE = 'type'
|
||||||
|
|||||||
Reference in New Issue
Block a user