1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
|
package bjc.dicelang.v2;
import bjc.utils.funcdata.FunctionalList;
import bjc.utils.funcdata.FunctionalMap;
import bjc.utils.funcdata.IList;
import bjc.utils.funcdata.IMap;
import java.util.Deque;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Set;
import static bjc.dicelang.v2.Errors.ErrorKey.*;
import static bjc.dicelang.v2.Token.Type.*;
public class Shunter {
// The binary operators and their
// priorities
private IMap<Token.Type, Integer> ops;
// Unary operators that can only be
// applied to non-operator tokens and yield operator tokens
private Set<Token.Type> unaryAdjectives;
// Unary operators that can only be
// applied to operator tokens and yield operator tokens
private Set<Token.Type> unaryAdverbs;
// Unary operators that can only be
// applied to operator tokens and yield data tokens
private Set<Token.Type> unaryGerunds;
private final int MATH_PREC = 20;
private final int DICE_PREC = 10;
private final int EXPR_PREC = 0;
public Shunter() {
ops = new FunctionalMap<>();
unaryAdjectives = new HashSet<>();
unaryAdverbs = new HashSet<>();
unaryGerunds = new HashSet<>();
unaryAdjectives.add(COERCE);
ops.put(ADD, 0 + MATH_PREC);
ops.put(SUBTRACT, 0 + MATH_PREC);
ops.put(MULTIPLY, 1 + MATH_PREC);
ops.put(IDIVIDE, 1 + MATH_PREC);
ops.put(DIVIDE, 1 + MATH_PREC);
ops.put(DICEGROUP, 0 + DICE_PREC);
ops.put(DICECONCAT, 1 + DICE_PREC);
ops.put(DICELIST, 2 + DICE_PREC);
ops.put(LET, 0 + EXPR_PREC);
ops.put(BIND, 1 + EXPR_PREC);
}
private boolean isUnary(Token ty) {
switch(ty.type) {
default:
return false;
}
}
private boolean isOp(Token tk) {
Token.Type ty = tk.type;
if(ops.containsKey(ty)) return true;
if(unaryAdjectives.contains(ty)) return true;
if(unaryAdverbs.contains(ty)) return true;
if(unaryGerunds.contains(ty)) return true;
if(ty == TAGOPR) return true;
return false;
}
public boolean shuntTokens(IList<Token> tks, IList<Token> returned) {
Deque<Token> opStack = new LinkedList<>();
boolean unaryMode = false;
Deque<Token> unaryOps = new LinkedList<>();
Deque<Token> currReturned = new LinkedList<>();
for(Token tk : tks.toIterable()) {
Token.Type tkType = tk.type;
if(unaryMode) {
if(isUnary(tk)) {
unaryOps.add(tk);
continue;
}
Token unaryOp = unaryOps.pop();
Token.Type unaryType = unaryOp.type;
if(unaryAdjectives.contains(unaryType)) {
if(isOp(tk)) {
Errors.inst.printError(EK_SHUNT_NOTADV, unaryOp.toString(), tk.toString());
return false;
}
Token newTok = new Token(TAGOPR);
if(tk.type == TAGOP) {
newTok.tokenValues = tk.tokenValues;
} else {
newTok.tokenValues = new FunctionalList<>();
}
newTok.tokenValues.add(unaryOp);
opStack.push(newTok);
} else if(unaryAdverbs.contains(unaryType)) {
// @TODO finish implementing unary operators
// this will require adding a 'backfeed' to the shunter to catch
// tokens we missed while parsing unary operators
}
}
if(isUnary(tk)) {
unaryMode = true;
unaryOps.add(tk);
continue;
} else if(isOp(tk)) {
while(!opStack.isEmpty() && isHigherPrec(tk, opStack.peek())) {
currReturned.addLast(opStack.pop());
}
opStack.push(tk);
} else if(tk.type == OPAREN || tk.type == OBRACE) {
opStack.push(tk);
if(tk.type == OBRACE) currReturned.addLast(tk);
} else if(tk.type == CPAREN || tk.type == CBRACE) {
Token matching = null;
switch(tk.type) {
case CPAREN:
matching = new Token(OPAREN, tk.intValue);
break;
case CBRACE:
matching = new Token(OBRACE, tk.intValue);
break;
default:
break;
}
if(!opStack.contains(matching)) {
Errors.inst.printError(EK_SHUNT_NOGROUP, tk.toString(), matching.toString());
return false;
}
while(!opStack.peek().equals(matching)) {
currReturned.addLast(opStack.pop());
}
if(tk.type == CBRACE) {
currReturned.addLast(tk);
}
opStack.pop();
} else if(tk.type == GROUPSEP) {
IList<Token> group = new FunctionalList<>();
while(currReturned.size() != 0 && !currReturned.peek().isGrouper()) {
group.add(currReturned.pop());
}
while(opStack.size() != 0 && !opStack.peek().isGrouper()) {
group.add(opStack.pop());
}
if(currReturned.size() == 0) {
Errors.inst.printError(EK_SHUNT_INVSEP);
return false;
}
currReturned.addLast(new Token(TOKGROUP, group));
} else {
currReturned.addLast(tk);
}
}
// Flush leftover operators
while(!opStack.isEmpty()) {
currReturned.addLast(opStack.pop());
}
for(Token tk : currReturned) {
returned.add(tk);
}
return true;
}
private boolean isHigherPrec(Token lft, Token rght) {
Token.Type left = lft.type;
Token.Type right = rght.type;
boolean exists = ops.containsKey(right);
// If it doesn't, the left is higher precedence.
if (!exists) {
return false;
}
int rightPrecedence = ops.get(right);
int leftPrecedence = ops.get(left);
return rightPrecedence >= leftPrecedence;
}
}
|