summaryrefslogtreecommitdiff
path: root/dice-lang/src/bjc/dicelang/Tokenizer.java
diff options
context:
space:
mode:
Diffstat (limited to 'dice-lang/src/bjc/dicelang/Tokenizer.java')
-rw-r--r--dice-lang/src/bjc/dicelang/Tokenizer.java126
1 files changed, 63 insertions, 63 deletions
diff --git a/dice-lang/src/bjc/dicelang/Tokenizer.java b/dice-lang/src/bjc/dicelang/Tokenizer.java
index a4b72b9..b73c0a5 100644
--- a/dice-lang/src/bjc/dicelang/Tokenizer.java
+++ b/dice-lang/src/bjc/dicelang/Tokenizer.java
@@ -12,57 +12,57 @@ import java.util.regex.Pattern;
import static bjc.dicelang.Errors.ErrorKey.*;
import static bjc.dicelang.Token.Type.*;
-
public class Tokenizer {
// Literal tokens for tokenization
private IMap<String, Token.Type> litTokens;
-
+
private DiceLangEngine eng;
private int nextSym = 0;
-
+
public Tokenizer(DiceLangEngine engine) {
eng = engine;
-
+
litTokens = new FunctionalMap<>();
- litTokens.put("+", ADD);
- litTokens.put("-", SUBTRACT);
- litTokens.put("*", MULTIPLY);
- litTokens.put("/", DIVIDE);
- litTokens.put("//", IDIVIDE);
- litTokens.put("sd", DICESCALAR);
- litTokens.put("df", DICEFUDGE);
- litTokens.put("dg", DICEGROUP);
- litTokens.put("dc", DICECONCAT);
- litTokens.put("dl", DICELIST);
- litTokens.put("=>", LET);
- litTokens.put(":=", BIND);
+ litTokens.put("+", ADD);
+ litTokens.put("-", SUBTRACT);
+ litTokens.put("*", MULTIPLY);
+ litTokens.put("/", DIVIDE);
+ litTokens.put("//", IDIVIDE);
+ litTokens.put("sd", DICESCALAR);
+ litTokens.put("df", DICEFUDGE);
+ litTokens.put("dg", DICEGROUP);
+ litTokens.put("dc", DICECONCAT);
+ litTokens.put("dl", DICELIST);
+ litTokens.put("=>", LET);
+ litTokens.put(":=", BIND);
litTokens.put(".+.", STRCAT);
litTokens.put(".*.", STRREP);
- litTokens.put(",", GROUPSEP);
+ litTokens.put(",", GROUPSEP);
litTokens.put("crc", COERCE);
}
-
+
public Token lexToken(String token, IMap<String, String> stringLts) {
- if(token.equals("")) return null;
+ if (token.equals(""))
+ return null;
Token tk = Token.NIL_TOKEN;
- if(litTokens.containsKey(token)) {
+ if (litTokens.containsKey(token)) {
tk = new Token(litTokens.get(token));
} else {
- switch(token.charAt(0)) {
- case '(':
- case ')':
- case '[':
- case ']':
- case '{':
- case '}':
- tk = tokenizeGrouping(token);
- break;
- default:
- tk = tokenizeLiteral(token, stringLts);
+ switch (token.charAt(0)) {
+ case '(':
+ case ')':
+ case '[':
+ case ']':
+ case '{':
+ case '}':
+ tk = tokenizeGrouping(token);
+ break;
+ default:
+ tk = tokenizeLiteral(token, stringLts);
}
}
@@ -72,52 +72,52 @@ public class Tokenizer {
private Token tokenizeGrouping(String token) {
Token tk = Token.NIL_TOKEN;
- if(StringUtils.containsOnly(token, "\\" + token.charAt(0))) {
- switch(token.charAt(0)) {
- case '(':
- tk = new Token(OPAREN, token.length());
- break;
- case ')':
- tk = new Token(CPAREN, token.length());
- break;
- case '[':
- tk = new Token(OBRACKET, token.length());
- break;
- case ']':
- tk = new Token(CBRACKET, token.length());
- break;
- case '{':
- tk = new Token(OBRACE, token.length());
- break;
- case '}':
- tk = new Token(CBRACE, token.length());
- break;
- default:
- Errors.inst.printError(EK_TOK_UNGROUP, token);
- break;
+ if (StringUtils.containsOnly(token, "\\" + token.charAt(0))) {
+ switch (token.charAt(0)) {
+ case '(':
+ tk = new Token(OPAREN, token.length());
+ break;
+ case ')':
+ tk = new Token(CPAREN, token.length());
+ break;
+ case '[':
+ tk = new Token(OBRACKET, token.length());
+ break;
+ case ']':
+ tk = new Token(CBRACKET, token.length());
+ break;
+ case '{':
+ tk = new Token(OBRACE, token.length());
+ break;
+ case '}':
+ tk = new Token(CBRACE, token.length());
+ break;
+ default:
+ Errors.inst.printError(EK_TOK_UNGROUP, token);
+ break;
}
}
return tk;
}
- private Pattern hexadecimalMatcher = Pattern.compile("\\A[\\-\\+]?0x[0-9A-Fa-f]+\\Z");
+ private Pattern hexadecimalMatcher = Pattern.compile("\\A[\\-\\+]?0x[0-9A-Fa-f]+\\Z");
private Pattern flexadecimalMatcher = Pattern.compile("\\A[\\-\\+]?[0-9][0-9A-Za-z]+B\\d{1,2}\\Z");
- private Pattern stringLitMatcher = Pattern.compile("\\AstringLiteral(\\d+)\\Z");
+ private Pattern stringLitMatcher = Pattern.compile("\\AstringLiteral(\\d+)\\Z");
private Token tokenizeLiteral(String token, IMap<String, String> stringLts) {
Token tk = Token.NIL_TOKEN;
- if(StringUtils.isInt(token)) {
+ if (StringUtils.isInt(token)) {
tk = new Token(INT_LIT, Long.parseLong(token));
- } else if(hexadecimalMatcher.matcher(token).matches()) {
+ } else if (hexadecimalMatcher.matcher(token).matches()) {
String newToken = token.substring(0, 1) + token.substring(token.indexOf('x'));
tk = new Token(INT_LIT, Long.parseLong(newToken.substring(2).toUpperCase(), 16));
- } else if(flexadecimalMatcher.matcher(token).matches()) {
+ } else if (flexadecimalMatcher.matcher(token).matches()) {
int parseBase = Integer.parseInt(token.substring(token.lastIndexOf('B') + 1));
- if(parseBase < Character.MIN_RADIX || parseBase > Character.MAX_RADIX) {
+ if (parseBase < Character.MIN_RADIX || parseBase > Character.MAX_RADIX) {
Errors.inst.printError(EK_TOK_INVBASE, Integer.toString(parseBase));
return Token.NIL_TOKEN;
}
@@ -130,14 +130,14 @@ public class Tokenizer {
Errors.inst.printError(EK_TOK_INVFLEX, flexNum, Integer.toString(parseBase));
return Token.NIL_TOKEN;
}
- } else if(StringUtils.isDouble(token)) {
+ } else if (StringUtils.isDouble(token)) {
tk = new Token(FLOAT_LIT, Double.parseDouble(token));
- } else if(DiceBox.isValidExpression(token)) {
+ } else if (DiceBox.isValidExpression(token)) {
tk = new Token(DICE_LIT, DiceBox.parseExpression(token));
} else {
Matcher stringLit = stringLitMatcher.matcher(token);
- if(stringLit.matches()) {
+ if (stringLit.matches()) {
int litNum = Integer.parseInt(stringLit.group(1));
eng.addStringLiteral(litNum, stringLts.get(token));