blob: a8fbcaa75702bfd25bfaa0ce76772eed5925f1fb (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
|
package bjc.dicelang.expr;
import java.util.LinkedList;
import java.util.List;
import bjc.utils.funcdata.IList;
import bjc.utils.parserutils.splitter.ConfigurableTokenSplitter;
/*
* @TODO 10/08/18 :IntExpressions
* Add support for integer constants, and maybe floating-point ones as well
* if you feel like. Heck, you could even go for ratio constants and things
* as well.
*/
/**
* Implements the lexer for simple expression operations.
*
* @author Ben Culkin
*/
public class Lexer {
/* Splitter we use. */
private final ConfigurableTokenSplitter split;
/** Create a new expression lexer. */
public Lexer() {
split = new ConfigurableTokenSplitter(true);
split.addSimpleDelimiters("(", ")");
split.addSimpleDelimiters("+", "-", "*", "/");
}
/**
* Convert a string from a input command to a series of infix tokens.
*
* @param inp
* The input command.
*
* @param tks
* The token state.
*
* @return A series of infix tokens representing the command.
*/
public Token[] lexString(final String inp, final Tokens tks) {
/* Split tokens on whitespace. */
final String[] spacedTokens = inp.split("[ \t]");
/* Tokens to return. */
final List<Token> tokens = new LinkedList<>();
/* Process each token. */
for (final String spacedToken : spacedTokens) {
/* Split on operators. */
final IList<String> splitTokens = split.split(spacedToken);
/* Convert strings to tokens. */
final IList<Token> rawTokens = splitTokens.map(tok -> tks.lexToken(tok, spacedToken));
/* Add tokens to results. */
rawTokens.forEach(tokens::add);
}
return tokens.toArray(new Token[0]);
}
}
|