summaryrefslogtreecommitdiff
path: root/base
diff options
context:
space:
mode:
authorBenjamin J. Culkin <bjculkin@mix.wvu.edu>2017-10-25 12:10:14 -0300
committerBenjamin J. Culkin <bjculkin@mix.wvu.edu>2017-10-25 12:10:14 -0300
commit7bda9de511a5642efb297eae98c6ea7c42b27754 (patch)
treedff1aa772b9ac088c5bd07b8d10d944cbff89f96 /base
parentf028ea6dc555fc5192a96b00b8e96e90dbf6de55 (diff)
Start switch to maven modules
Diffstat (limited to 'base')
-rw-r--r--base/.gitignore3
-rw-r--r--base/data/help/cli.help45
-rw-r--r--base/lang-desc.md59
-rw-r--r--base/lang-impl.md23
-rw-r--r--base/lang-process.md65
-rw-r--r--base/pom.xml116
-rw-r--r--base/src/bjc/dicelang/CLIArgsParser.java215
-rw-r--r--base/src/bjc/dicelang/CompilerTweaker.java102
-rw-r--r--base/src/bjc/dicelang/Define.java198
-rw-r--r--base/src/bjc/dicelang/DiceLangConsole.java320
-rw-r--r--base/src/bjc/dicelang/DiceLangEngine.java695
-rw-r--r--base/src/bjc/dicelang/Errors.java523
-rw-r--r--base/src/bjc/dicelang/Evaluator.java603
-rw-r--r--base/src/bjc/dicelang/EvaluatorResult.java220
-rw-r--r--base/src/bjc/dicelang/Node.java105
-rw-r--r--base/src/bjc/dicelang/Parser.java221
-rw-r--r--base/src/bjc/dicelang/Shunter.java380
-rw-r--r--base/src/bjc/dicelang/Token.java149
-rw-r--r--base/src/bjc/dicelang/Tokenizer.java174
-rw-r--r--base/src/bjc/dicelang/dice/CompoundDie.java60
-rw-r--r--base/src/bjc/dicelang/dice/CompoundingDie.java115
-rw-r--r--base/src/bjc/dicelang/dice/DiceBox.java310
-rw-r--r--base/src/bjc/dicelang/dice/Die.java39
-rw-r--r--base/src/bjc/dicelang/dice/DieExpression.java69
-rw-r--r--base/src/bjc/dicelang/dice/DieList.java31
-rw-r--r--base/src/bjc/dicelang/dice/ExplodingDice.java124
-rw-r--r--base/src/bjc/dicelang/dice/FudgeDie.java67
-rw-r--r--base/src/bjc/dicelang/dice/MathDie.java121
-rw-r--r--base/src/bjc/dicelang/dice/ScalarDie.java47
-rw-r--r--base/src/bjc/dicelang/dice/SimpleDie.java119
-rw-r--r--base/src/bjc/dicelang/dice/SimpleDieList.java77
-rw-r--r--base/src/bjc/dicelang/expr/Ezpr.java149
-rw-r--r--base/src/bjc/dicelang/expr/Lexer.java62
-rw-r--r--base/src/bjc/dicelang/expr/Parser.java147
-rw-r--r--base/src/bjc/dicelang/expr/Shunter.java110
-rw-r--r--base/src/bjc/dicelang/expr/Token.java88
-rw-r--r--base/src/bjc/dicelang/expr/TokenType.java56
-rw-r--r--base/src/bjc/dicelang/expr/Tokens.java96
-rw-r--r--base/src/bjc/dicelang/scl/StreamControlConsole.java75
-rw-r--r--base/src/bjc/dicelang/scl/StreamControlEngine.java546
-rw-r--r--base/src/bjc/dicelang/scl/StreamEngine.java266
-rw-r--r--base/src/bjc/dicelang/util/ResourceLoader.java43
-rw-r--r--base/todos.txt8
43 files changed, 7041 insertions, 0 deletions
diff --git a/base/.gitignore b/base/.gitignore
new file mode 100644
index 0000000..e8c76f2
--- /dev/null
+++ b/base/.gitignore
@@ -0,0 +1,3 @@
+/bin/
+/target/
+/tags
diff --git a/base/data/help/cli.help b/base/data/help/cli.help
new file mode 100644
index 0000000..433d5de
--- /dev/null
+++ b/base/data/help/cli.help
@@ -0,0 +1,45 @@
+Help for DiceLang v0.2 CLI arguments:
+ -h or --help Show help
+ Shows this help message
+
+ -d or --debug Turn on debug mode.
+ Turns on debug mode, which prints additional information that
+ may be useful for finding errors.
+
+ -nd or --no-debug Turn off debug mode.
+ Turns off debug mode.
+
+ -po or --postfix Turn on postfix mode.
+ Turns on postfix mode, which disables the shunter.
+
+ -npo or --no-postfix Turn off postfix mode.
+ Turns off postfix mode.
+
+ -pr or --prefix Turn on prefix mode.
+ Turns on prefix mode, which reverses the expression instead of
+ shunting it.
+
+ -npr or --no-prefix Turn off prefix mode.
+ Turns off prefix mode.
+
+ -se or --step-eval Turn on step-eval mode.
+ Turns on step-evaluation, which shows the evaluation process
+ step-by-step. Currently slightly broken.
+
+ -nse or --no-step-eval Turn off step-eval mode.
+ Turns off step-evaluation.
+
+ -D or --define <name> [<val>] Define a constant
+ Defines name as a textual alias for val. If val isn't provided,
+ name is deleted wherever it occurs. These constants are applied
+ once to the input lines with a priority of 5
+
+ -df or --define-file <filename> Load defines from a file
+ Loads defines from a file. Consult 'define-file' in the DiceLang
+ help system for details on the format.
+
+ -ctf or --compiler-tweak-file <filename> Load compiler tweaks
+ from a file
+ Loads a series of compiler tweaks from a file. Consult
+ 'compiler-tweak-file' in the DiceLang help system for details
+ on the format.
diff --git a/base/lang-desc.md b/base/lang-desc.md
new file mode 100644
index 0000000..fade9a0
--- /dev/null
+++ b/base/lang-desc.md
@@ -0,0 +1,59 @@
+# Dice-Lang Language Description
+Dice lang was originally just a program for rolling
+patterns of dice. However, through some effort
+and pushing a shunting-yard parser farther than
+it probably should have gone, it became a language.
+It's still missing some things, but its getting there
+
+## Basic Syntax
+You can use it like a 4-function calculator.
+```
+1+1
+-> 2
+1+1
+2+2*2+2
+-> 8
+```
+However, we don't support floating point (numbers or math)
+```
+1.1
+-> ERROR: Floating point literals are not supported
+10/3
+-> 3
+```
+We do, however, support dice literals
+```
+1d6
+-> 6
+1d6
+-> 3
+```
+These can be treated as numbers, but won't get turned into
+numbers until you actually ask them to turn into numbers.
+
+## Variables and Assignment
+There are variables, and you can assign things to them
+```
+test := 1
+-> 1
+```
+When you assign a variable, its current value is mentioned.
+To make sure that dice behave correctly, you can bind
+them to a variable
+```
+die := 1d6
+-> 5
+die
+-> 3
+```
+There exists a meta-variable 'last' whose value is always the
+result of the last expression.
+```
+test := 1d6*2d8
+-> 9
+last
+-> 30
+```
+We also have let, for binding things in the context of an
+expression. However, it is not as useful at the moment, because blocks don't exist yet.
+## Arrays
diff --git a/base/lang-impl.md b/base/lang-impl.md
new file mode 100644
index 0000000..c6c2ce2
--- /dev/null
+++ b/base/lang-impl.md
@@ -0,0 +1,23 @@
+# Language implementation details
+First, a command is read from the user, and
+checked to see if it has any interpreter pragmas
+in it. If so, the interpreter pragma is handled
+and we move onto the next command.
+
+Next, the command is prepared for parsing.
+This involves 4 steps
+1. Convert the command into tokens
+2. Split operators from tokens. This means
+ converting tokens like 2+2 into the three tokens
+ 2, + and 2
+3. Deaffix tokens. This means deattaching brackets
+ and parenthesis from their attached tokens.
+4. Remove blank tokens
+
+Next, is parsing. This is just a modified version
+of the shunting-yard algorithm, with the
+main modification being it properly handles
+multiple nesting levels of parenthesis
+
+Then, the AST is created from the parsed
+string. \ No newline at end of file
diff --git a/base/lang-process.md b/base/lang-process.md
new file mode 100644
index 0000000..1175fca
--- /dev/null
+++ b/base/lang-process.md
@@ -0,0 +1,65 @@
+# Top level reader loop
+First, a command is read in from the user under a prompt
+
+Next, it is checked if the first token of the command (split on " ")
+corresponds to an action name
+* If it does, the corresponding action is executed, and the next command is
+ read
+* If it doesn't, the command is parsed as a language command and executed
+
+This continues until the command "quit" (non case-sensitive) is read
+
+# Known actions
+The currently implemented actions are:
+* env: print out the contents of the enviroment. Takes no arguments
+* inline: inline variables out of the definition of specified variables. Takes
+ at least two arguments
+ * The first is the name of the variable to do inlining in
+ * The second is the name of the variable to bind the inlined expression to
+ * The third and following are the names of the variables to inline in the
+ variable you are inlining. If you don't give any, it will inline every
+ variable
+
+## Details on inlining
+The way the inlining process works is simple. The tree for the variable to be
+inlined is read, and then each variable reference is inlined if it is marked as
+one of the variable references to inline. This only occurs one layer deep
+
+# Parsing language commands
+Once it is decided to parse a command as a language command, it goes through
+four steps:
+* Preparation
+* AST Building
+* AST Transformation
+* AST Evaluation
+
+# Command preparation
+Command preparation means turning the raw space-seperated tokens into tokens
+suitable for feeding the AST builder. It involves the following steps:
+* The first is operator expansion, which turns a token like 4+4 into the three
+ tokens 4, +, and 4
+* The next is token deaffixation which turns a token like (4 into the tokens (
+ and 4. However, ((4 will become (( 4, not ( ( 4, because (( is a different
+ nesting level than (
+* Next, any blank tokens that have been created as a result of the process are
+ disposed of
+* Finally, the entire expression is shunted, turning it from infix notation to
+ postfix notation. The only real special thing about this is that (( is a
+ different nesting level than (. The token precedence levels are (sorted from
+ lower to higher precedence):
+ * Math precedence
+ * + & -
+ * * & /
+ *
+ * Dice precedence
+ * d
+ * c
+ * Expression precedence
+ * =>
+ * :=
+
+# AST Building
+AST building is a slightly more complex process, as the tree is traversed
+twice: once to build the tree, and then again to convert the tokens from
+strings into data tokens. The only really finicky part about the first tree is
+arrays, because arrays are still in infix form at this point.
diff --git a/base/pom.xml b/base/pom.xml
new file mode 100644
index 0000000..0fb24d7
--- /dev/null
+++ b/base/pom.xml
@@ -0,0 +1,116 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>bjc</groupId>
+ <artifactId>dicelang-Parent</artifactId>
+ <version>1.0.0</version>
+ </parent>
+
+ <artifactId>dice-lang</artifactId>
+ <packaging>jar</packaging>
+
+ <properties>
+ <main.class1>bjc.dicelang.scl.StreamControlConsole</main.class1>
+ <main.class>bjc.dicelang.DiceLangConsole</main.class>
+ </properties>
+
+ <build>
+ <sourceDirectory>src</sourceDirectory>
+
+ <plugins>
+ <plugin>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.7.0</version>
+
+ <configuration>
+ <source>1.8</source>
+ <target>1.8</target>
+ </configuration>
+ </plugin>
+
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <version>1.5.0</version>
+
+ <configuration>
+ <executable>java</executable>
+
+ <arguments>
+ <argument>-agentlib:jdwp=transport=dt_socket,server=y,suspend=n</argument>
+ <argument>-classpath</argument>
+
+ <classpath/>
+
+ <argument>${main.class}</argument>
+ </arguments>
+
+ <mainClass>${main.class}</mainClass>
+ </configuration>
+ </plugin>
+
+ <plugin>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <version>3.0.0</version>
+
+ <configuration>
+ <descriptorRefs>
+ <descriptorRef>jar-with-dependencies</descriptorRef>
+ </descriptorRefs>
+
+ <archive>
+ <manifest>
+ <mainClass>${main.class}</mainClass>
+ </manifest>
+ </archive>
+ </configuration>
+
+ <executions>
+ <execution>
+ <id>make-assembly</id>
+
+ <phase>package</phase>
+
+ <goals>
+ <goal>single</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+
+ <resources>
+ <resource>
+ <directory>data/</directory>
+
+ <includes>
+ <include>**/*.txt</include>
+ <include>**/*.help</include>
+ </includes>
+ </resource>
+ </resources>
+ </build>
+
+ <repositories>
+ <repository>
+ <id>jline</id>
+ <name>JLine Project Repository</name>
+ <url>http://jline.sourceforge.net/m2repo</url>
+ </repository>
+ </repositories>
+
+ <dependencies>
+ <dependency>
+ <groupId>bjc</groupId>
+ <artifactId>BJC-Utils2</artifactId>
+ <version>1.0.0</version>
+ </dependency>
+
+ <dependency>
+ <groupId>jline</groupId>
+ <artifactId>jline</artifactId>
+ <version>0.9.9</version>
+ </dependency>
+ </dependencies>
+</project>
diff --git a/base/src/bjc/dicelang/CLIArgsParser.java b/base/src/bjc/dicelang/CLIArgsParser.java
new file mode 100644
index 0000000..54061bd
--- /dev/null
+++ b/base/src/bjc/dicelang/CLIArgsParser.java
@@ -0,0 +1,215 @@
+package bjc.dicelang;
+
+import static bjc.dicelang.Errors.ErrorKey.EK_CLI_INVDFNTYPE;
+import static bjc.dicelang.Errors.ErrorKey.EK_CLI_MISARG;
+import static bjc.dicelang.Errors.ErrorKey.EK_CLI_UNARG;
+import static bjc.dicelang.Errors.ErrorKey.EK_MISC_IOEX;
+import static bjc.dicelang.Errors.ErrorKey.EK_MISC_NOFILE;
+
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Scanner;
+
+import bjc.dicelang.util.ResourceLoader;
+
+/**
+ * Parse CLI arguments.
+ *
+ * @author Ben Culkin
+ *
+ */
+public class CLIArgsParser {
+ /**
+ * Parse the provided set of CLI arguments.
+ *
+ * @param args
+ * The CLI arguments to parse.
+ * @param eng
+ * The engine to affect with parsing.
+ *
+ * @return Whether or not to continue to the DiceLang repl.
+ */
+ public static boolean parseArgs(final String[] args, final DiceLangEngine eng) {
+ if (args.length < 0) {
+ return true;
+ }
+
+ if (args.length == 1 && (args[0].equals("--help") || args[0].equals("-h"))) {
+ for (final String lne : ResourceLoader.loadHelpFile("cli")) {
+ System.out.println(lne);
+ }
+
+ System.exit(0);
+ }
+
+ for (int i = 0; i < args.length; i++) {
+ final String arg = args[i];
+
+ /*
+ * @TODO 10/08/17 Ben Culkin :CLIArgRefactor
+ * Use whatever library gets added to BJC-Utils for
+ * this, and extend these to do more things.
+ */
+ switch (arg) {
+ case "-d":
+ case "--debug":
+ if (!eng.toggleDebug()) {
+ eng.toggleDebug();
+ }
+ break;
+ case "-nd":
+ case "--no-debug":
+ if (eng.toggleDebug()) {
+ eng.toggleDebug();
+ }
+ break;
+ case "-po":
+ case "--postfix":
+ if (!eng.togglePostfix()) {
+ eng.togglePostfix();
+ }
+ break;
+ case "-npo":
+ case "--no-postfix":
+ if (eng.togglePostfix()) {
+ eng.togglePostfix();
+ }
+ break;
+ case "-pr":
+ case "--prefix":
+ if (!eng.togglePrefix()) {
+ eng.togglePrefix();
+ }
+ break;
+ case "-npr":
+ case "--no-prefix":
+ if (eng.togglePrefix()) {
+ eng.togglePrefix();
+ }
+ break;
+ case "-se":
+ case "--stepeval":
+ if (!eng.toggleStepEval()) {
+ eng.toggleStepEval();
+ }
+ break;
+ case "-nse":
+ case "--no-stepeval":
+ if (eng.toggleStepEval()) {
+ eng.toggleStepEval();
+ }
+ break;
+ case "-D":
+ case "--define":
+ i = simpleDefine(i, args, eng);
+ if (i == -1) {
+ return false;
+ }
+ break;
+ case "-df":
+ case "--define-file":
+ i = defineFile(i, args, eng);
+ if (i == -1) {
+ return false;
+ }
+ break;
+ case "-ctf":
+ case "--compiler-tweak-file":
+ /* @NOTE
+ * Not yet implemented.
+ */
+ default:
+ Errors.inst.printError(EK_CLI_UNARG, arg);
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ /* Handle parsing a simple define. */
+ private static int simpleDefine(final int i, final String[] args,
+ final DiceLangEngine eng) {
+ /* :DefineRefactor */
+
+ if (i >= args.length - 1) {
+ Errors.inst.printError(EK_CLI_MISARG, "define");
+ return -1;
+ }
+
+ if (i >= args.length - 2) {
+ final Define dfn = new Define(5, false, false, false, null, args[i + 1],
+ Arrays.asList(""));
+
+ if (dfn.inError) {
+ return -1;
+ }
+
+ eng.addLineDefine(dfn);
+ return i + 1;
+ }
+
+ final Define dfn = new Define(5, false, false, false, null, args[i + 1],
+ Arrays.asList(args[i + 2]));
+
+ if (dfn.inError) {
+ return -1;
+ }
+
+ eng.addLineDefine(dfn);
+ return i + 2;
+ }
+
+ /* Load a series of defines from a file. */
+ private static int defineFile(final int i, final String[] args,
+ final DiceLangEngine eng) {
+ if (i >= args.length - 1) {
+ Errors.inst.printError(EK_CLI_MISARG, "define-file");
+ return -1;
+ }
+
+ final String fName = args[i + 1];
+
+ try (FileInputStream fis = new FileInputStream(fName)) {
+ try (Scanner scan = new Scanner(fis)) {
+ while (scan.hasNextLine()) {
+ final String ln = scan.nextLine();
+
+ final Define dfn = parseDefine(ln.substring(ln.indexOf(' ')));
+
+ if (dfn == null || dfn.inError) {
+ return -1;
+ }
+
+ if (ln.startsWith("line")) {
+ eng.addLineDefine(dfn);
+ } else if (ln.startsWith("token")) {
+ eng.addTokenDefine(dfn);
+ } else {
+ final String defnType = ln.substring(0, ln.indexOf(' '));
+
+ Errors.inst.printError(EK_CLI_INVDFNTYPE, defnType);
+ return -1;
+ }
+ }
+ }
+ } catch (final FileNotFoundException fnfex) {
+ Errors.inst.printError(EK_MISC_NOFILE, fName);
+ return -1;
+ } catch (final IOException ioex) {
+ Errors.inst.printError(EK_MISC_IOEX, fName);
+ return -1;
+ }
+
+ return i + 1;
+ }
+
+ private static Define parseDefine(final String ln) {
+ final Define res = null;
+
+ /* :DefineRefactor */
+ return res;
+ }
+}
diff --git a/base/src/bjc/dicelang/CompilerTweaker.java b/base/src/bjc/dicelang/CompilerTweaker.java
new file mode 100644
index 0000000..1154be4
--- /dev/null
+++ b/base/src/bjc/dicelang/CompilerTweaker.java
@@ -0,0 +1,102 @@
+package bjc.dicelang;
+
+import bjc.utils.parserutils.splitter.ConfigurableTokenSplitter;
+
+/*
+ * @TODO 10/09/17 Ben Culkin :CompilerTweaking
+ * Expand this to allow tweaking more things about the compiler.
+ */
+/**
+ * Contains methods for customizing the DiceLang and SCL compilers.
+ *
+ * @author Ben Culkin
+ */
+public class CompilerTweaker {
+ /* Bits of the compiler necessary */
+ private final DiceLangEngine eng;
+ private final ConfigurableTokenSplitter opExpander;
+
+ /**
+ * Create a new compiler tweaker.
+ *
+ * @param engine
+ * The engine to tweak.
+ */
+ public CompilerTweaker(final DiceLangEngine engine) {
+ eng = engine;
+
+ this.opExpander = engine.opExpander;
+ }
+
+ /**
+ * Add a string literal to the compiler's internal banks.
+ *
+ * @param val
+ * The string literal to add.
+ *
+ * @return The key into the string literal table for this string.
+ */
+ public int addStringLiteral(final String val) {
+ eng.addStringLiteral(eng.nextLiteral, val);
+
+ eng.nextLiteral += 1;
+ return eng.nextLiteral;
+ }
+
+ /**
+ * Add a line defn to the compiler.
+ *
+ * @param dfn
+ * The defn to add.
+ */
+ public void addLineDefine(final Define dfn) {
+ eng.addLineDefine(dfn);
+ }
+
+ /**
+ * Add a token defn to the compiler.
+ *
+ * @param dfn
+ * The defn to add.
+ */
+ public void addTokenDefine(final Define dfn) {
+ eng.addTokenDefine(dfn);
+ }
+
+ /**
+ * Adds delimiters that are expanded from tokens.
+ *
+ * @param delims
+ * The delimiters to expand on.
+ */
+ public void addDelimiter(final String... delims) {
+ opExpander.addSimpleDelimiters(delims);
+ }
+
+ /**
+ * Adds multi-character delimiters that are expanded from tokens.
+ *
+ * @param delims
+ * The multi-character delimiters to expand on.
+ */
+ public void addMultiDelimiter(final String... delims) {
+ opExpander.addMultiDelimiters(delims);
+ }
+
+ /**
+ * Make delimiter changes visible to the compiler.
+ */
+ public void compile() {
+ opExpander.compile();
+ }
+
+ /**
+ * Change the max no. of times defines are allowed to recur.
+ *
+ * @param times
+ * The number of times to allow defines to recur.
+ */
+ public static void setDefineRecurLimit(final int times) {
+ Define.MAX_RECURS = times;
+ }
+}
diff --git a/base/src/bjc/dicelang/Define.java b/base/src/bjc/dicelang/Define.java
new file mode 100644
index 0000000..5524477
--- /dev/null
+++ b/base/src/bjc/dicelang/Define.java
@@ -0,0 +1,198 @@
+package bjc.dicelang;
+
+import static bjc.dicelang.Errors.ErrorKey.EK_DFN_PREDSYN;
+import static bjc.dicelang.Errors.ErrorKey.EK_DFN_RECUR;
+import static bjc.dicelang.Errors.ErrorKey.EK_DFN_SRCSYN;
+
+import java.util.Iterator;
+import java.util.function.UnaryOperator;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.regex.PatternSyntaxException;
+
+import bjc.utils.data.CircularIterator;
+
+/*
+ * @TODO 10/09/17 Ben Culkin :DefineRefactor
+ * Consider replacing this with the defines package from BJC-Utils.
+ */
+/**
+ * A regular expression based pre-processor define.
+ *
+ * @author EVE
+ *
+ */
+public class Define implements UnaryOperator<String>, Comparable<Define> {
+ /**
+ * The define type.
+ *
+ * @author EVE
+ *
+ */
+ public static enum Type {
+ /** Match on lines. */
+ LINE,
+ /** Match on tokens. */
+ TOKEN
+ }
+
+ /** The max amount of times to recur on expansions. */
+ public static int MAX_RECURS = 10;
+
+ /** The priority of this definition. */
+ public final int priority;
+
+ /** Whether or not this definition is in error. */
+ public final boolean inError;
+
+ /* Whether this define is recurring. */
+ private boolean doRecur;
+ /* Whether this define is applied multiple times per unit. */
+ private boolean subType;
+
+ /* The pattern that needs to match to apply this. */
+ private Pattern predicate;
+ /* The pattern to use to find everything to replace. */
+ private Pattern searcher;
+
+ /* The array of replacement strings to use. */
+ private Iterator<String> replacers;
+ /* The current replacement string to use. */
+ private String replacer;
+
+ /**
+ * Create a new define.
+ *
+ * @param priorty
+ * The priority of the define.
+ *
+ * @param isSub
+ * Whether or not this is a 'sub-define'
+ *
+ * @param recur
+ * Whether this define is recursive or not.
+ *
+ * @param isCircular
+ * Whether this define is circular or not.
+ *
+ * @param predicte
+ * The string to use as a predicate.
+ *
+ * @param searchr
+ * The string to use as a search.
+ *
+ * @param replacrs
+ * The source for replacement strings.
+ */
+ public Define(final int priorty, final boolean isSub, final boolean recur,
+ final boolean isCircular,
+ final String predicte, final String searchr, final Iterable<String> replacrs) {
+ priority = priorty;
+ doRecur = recur;
+ subType = isSub;
+
+ /* Only try to compile non-null predicates */
+ if (predicte != null) {
+ try {
+ predicate = Pattern.compile(predicte);
+ } catch (final PatternSyntaxException psex) {
+ Errors.inst.printError(EK_DFN_PREDSYN, psex.getMessage());
+ inError = true;
+ return;
+ }
+ }
+
+ /* Compile the search pattern */
+ try {
+ searcher = Pattern.compile(searchr);
+ } catch (final PatternSyntaxException psex) {
+ Errors.inst.printError(EK_DFN_SRCSYN, psex.getMessage());
+ inError = true;
+ return;
+ }
+
+ inError = false;
+
+ /* Check whether or not we do sub-replacements */
+ if (subType) {
+ if (replacrs.iterator().hasNext()) {
+ replacers = new CircularIterator<>(replacrs, isCircular);
+ } else {
+ replacers = null;
+ }
+ } else {
+ final Iterator<String> itr = replacrs.iterator();
+
+ if (itr.hasNext()) {
+ replacer = itr.next();
+ } else {
+ replacer = "";
+ }
+ }
+ }
+
+ @Override
+ public String apply(final String tok) {
+ if (inError) {
+ return tok;
+ }
+
+ if (predicate != null) {
+ if (!predicate.matcher(tok).matches()) {
+ return tok;
+ }
+ }
+
+ String strang = doPass(tok);
+
+ if (doRecur) {
+ int recurCount = 0;
+
+ if (strang.equals(tok)) {
+ return strang;
+ }
+
+ final String oldStrang = strang;
+
+ do {
+ strang = doPass(tok);
+ recurCount += 1;
+ } while (!strang.equals(oldStrang) && recurCount < MAX_RECURS);
+
+ if (recurCount >= MAX_RECURS) {
+ Errors.inst.printError(EK_DFN_RECUR, Integer.toString(MAX_RECURS), tok, strang);
+ return strang;
+ }
+ }
+
+ return strang;
+ }
+
+ /* Apply a definition pass. */
+ private String doPass(final String tok) {
+ final Matcher searcherMatcher = searcher.matcher(tok);
+
+ if (subType) {
+ final StringBuffer sb = new StringBuffer();
+
+ while (searcherMatcher.find()) {
+ if (replacers == null) {
+ searcherMatcher.appendReplacement(sb, "");
+ } else {
+ final String replac = replacers.next();
+ searcherMatcher.appendReplacement(sb, replac);
+ }
+ }
+
+ searcherMatcher.appendTail(sb);
+ return sb.toString();
+ }
+
+ return searcherMatcher.replaceAll(replacer);
+ }
+
+ @Override
+ public int compareTo(final Define o) {
+ return priority - o.priority;
+ }
+}
diff --git a/base/src/bjc/dicelang/DiceLangConsole.java b/base/src/bjc/dicelang/DiceLangConsole.java
new file mode 100644
index 0000000..296874f
--- /dev/null
+++ b/base/src/bjc/dicelang/DiceLangConsole.java
@@ -0,0 +1,320 @@
+package bjc.dicelang;
+
+import static bjc.dicelang.Errors.ErrorKey.EK_CONS_INVDEFINE;
+import static bjc.dicelang.Errors.ErrorKey.EK_CONS_INVPRAG;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import jline.ConsoleReader;
+import jline.Terminal;
+
+/**
+ * CLI interface to DiceLang
+ *
+ * @author EVE
+ *
+ */
+public class DiceLangConsole {
+ /* The number of commands executed so far. */
+ private int commandNumber;
+ /* The engine that executes commands. */
+ private final DiceLangEngine eng;
+ /* The place to read input from. */
+ private ConsoleReader read;
+
+ /**
+ * Create a new console.
+ *
+ * @param args
+ * The CLI args for the console.
+ */
+ public DiceLangConsole(final String[] args) {
+ commandNumber = 0;
+ eng = new DiceLangEngine();
+
+ if (!CLIArgsParser.parseArgs(args, eng)) {
+ System.exit(1);
+ }
+
+ Terminal.setupTerminal();
+ }
+
+ /** Run the console. */
+ public void run() {
+ /* Set up console. */
+ try {
+ read = new ConsoleReader();
+ } catch (final IOException ioex) {
+ System.out.println("ERROR: Console init failed");
+ return;
+ }
+
+ /* Print greeting. */
+ System.out.println("dice-lang v0.2");
+ String comm = null;
+
+ /* Read initial command. */
+ try {
+ comm = read.readLine(String.format("(%d) dice-lang> ", commandNumber));
+ } catch (final IOException ioex) {
+ System.out.println("ERROR: I/O failed");
+ return;
+ }
+
+ /* Run commands. */
+ /* @NOTE
+ * Should switch this to a do-while loop to reduce code
+ * duplication.
+ */
+ while (!comm.equals("quit") && !comm.equals("exit")) {
+ if (comm.startsWith("pragma")) {
+ /* Run pragmas. */
+ final boolean success = handlePragma(comm.substring(7));
+
+ if (success) {
+ System.out.println("Pragma completed succesfully");
+ } else {
+ System.out.println("Pragma execution failed");
+ }
+ } else {
+ /* Run commands. */
+ if (eng.debugMode) {
+ System.out.printf("\tRaw command: %s\n", comm);
+ }
+
+ final boolean success = eng.runCommand(comm);
+
+ if (success) {
+ System.out.println("Command completed succesfully");
+ } else {
+ System.out.println("Command execution failed");
+ }
+
+ commandNumber += 1;
+ }
+
+ /* Read the next command. */
+ try {
+ comm = read.readLine(String.format("(%d) dice-lang> ", commandNumber));
+ } catch (final IOException ioex) {
+ System.out.println("ERROR: I/O failed");
+ return;
+ }
+ }
+ }
+
+ /* Handle running pragmas. */
+ private boolean handlePragma(final String pragma) {
+ if (eng.debugMode) {
+ System.out.println("\tRaw pragma: " + pragma);
+ }
+
+ /* Grab the name from the arguments. */
+ String pragmaName = null;
+ final int firstIndex = pragma.indexOf(' ');
+
+ /* Handle argless pragmas. */
+ if (firstIndex == -1) {
+ pragmaName = pragma;
+ } else {
+ pragmaName = pragma.substring(0, firstIndex);
+ }
+
+ /* Run pragmas. */
+ /*
+ * @TODO 10/09/17 Ben Culkin :PragmaRefactor
+ * Swap to using something that makes it easier to add
+ * pragmas.
+ */
+ switch (pragmaName) {
+ case "debug":
+ System.out.println("\tDebug mode is now " + eng.toggleDebug());
+ break;
+ case "postfix":
+ System.out.println("\tPostfix mode is now " + eng.togglePostfix());
+ break;
+ case "prefix":
+ System.out.println("\tPrefix mode is now " + eng.togglePrefix());
+ break;
+ case "stepeval":
+ System.out.println("\tStepeval mode is now" + eng.toggleStepEval());
+ break;
+ case "define":
+ return defineMode(pragma.substring(7));
+ case "help":
+ return helpMode(pragma.substring(5));
+ default:
+ Errors.inst.printError(EK_CONS_INVPRAG, pragma);
+ return false;
+ }
+
+ return true;
+ }
+
+ /* Run a help mode. */
+ private static boolean helpMode(final String pragma) {
+ /* Get the help topic. */
+ switch (pragma.trim()) {
+ case "help":
+ System.out.println("\tGet help on pragmas");
+ break;
+ case "debug":
+ System.out.println("\tToggle debug mode. (Output stage results)");
+ break;
+ case "postfix":
+ System.out.println("\tToggle postfix mode. (Don't shunt tokens)");
+ break;
+ case "prefix":
+ System.out.println("\tToggle prefix mode. (Reverse token order instead of shunting)");
+ break;
+ case "stepeval":
+ System.out.println("\tToggle stepeval mode. (Print out evaluation progress)");
+ break;
+ case "define":
+ System.out.println("\tAdd a macro rewrite directive.");
+ System.out.println("\tdefine <priority> <type> <recursion> <guard> <circular> <patterns>...");
+ break;
+ default:
+ System.out.println("\tNo help available for pragma " + pragma);
+ }
+ /* Help always works */
+ return true;
+ }
+
+ /* Matches slash-delimited strings (like /text/ or /text\/text/). */
+ private final Pattern slashPattern = Pattern.compile("/((?:\\\\.|[^/\\\\])*)/");
+
+ /* Parse a define macro. */
+ private boolean defineMode(final String defineText) {
+ /* Grab all of the separator spaces. */
+ final int firstIndex = defineText.indexOf(' ');
+ final int secondIndex = defineText.indexOf(' ', firstIndex + 1);
+ final int thirdIndex = defineText.indexOf(' ', secondIndex + 1);
+ final int fourthIndex = defineText.indexOf(' ', thirdIndex + 1);
+ final int fifthIndex = defineText.indexOf(' ', fourthIndex + 1);
+ final int sixthIndex = defineText.indexOf(' ', fifthIndex + 1);
+
+ /*
+ * Error if we got something we didn't need, or didn't get
+ * something we need.
+ */
+ if (firstIndex == -1) {
+ Errors.inst.printError(EK_CONS_INVDEFINE, "(no priority)");
+ return false;
+ } else if (secondIndex == -1) {
+ Errors.inst.printError(EK_CONS_INVDEFINE, "(no define type)");
+ return false;
+ } else if (thirdIndex == -1) {
+ Errors.inst.printError(EK_CONS_INVDEFINE, "(no recursion type)");
+ return false;
+ } else if (fourthIndex == -1) {
+ Errors.inst.printError(EK_CONS_INVDEFINE, "(no guard type)");
+ return false;
+ } else if (fifthIndex == -1) {
+ Errors.inst.printError(EK_CONS_INVDEFINE, "(no circularity)");
+ return false;
+ } else if (sixthIndex == -1) {
+ Errors.inst.printError(EK_CONS_INVDEFINE, "(no patterns)");
+ return false;
+ }
+
+ /* Get the priority and define type. */
+ final int priority = Integer.parseInt(defineText.substring(0, firstIndex));
+ final String defineType = defineText.substring(firstIndex + 1, secondIndex);
+
+ Define.Type type;
+ boolean subMode = false;
+
+ /* Parse the define type. */
+ switch (defineType) {
+ case "line":
+ type = Define.Type.LINE;
+ break;
+ case "token":
+ type = Define.Type.TOKEN;
+ break;
+ case "subline":
+ type = Define.Type.LINE;
+ subMode = true;
+ break;
+ case "subtoken":
+ type = Define.Type.TOKEN;
+ subMode = true;
+ break;
+ default:
+ Errors.inst.printError(EK_CONS_INVDEFINE, "(unknown type)");
+ return false;
+ }
+
+ /* Do we want this to be a recursive pattern? */
+ final boolean doRecur = defineText.substring(secondIndex + 1, thirdIndex)
+ .equalsIgnoreCase("true");
+ /* Do we want this pattern to have a guard? */
+ final boolean hasGuard = defineText.substring(thirdIndex + 1, fourthIndex)
+ .equalsIgnoreCase("true");
+ /* Do we want this pattern to use circular replacements. */
+ final boolean isCircular = defineText.substring(thirdIndex + 1, fourthIndex)
+ .equalsIgnoreCase("true");
+
+ /* The part of the string that contains patterns. */
+ final String pats = defineText.substring(fifthIndex + 1).trim();
+ final Matcher patMatcher = slashPattern.matcher(pats);
+ String guardPattern = null;
+
+ if (hasGuard) {
+ /* Grab the guard pattern. */
+ if (!patMatcher.find()) {
+ Errors.inst.printError(EK_CONS_INVDEFINE, "(no guard pattern)");
+ return false;
+ }
+
+ guardPattern = patMatcher.group(1);
+ }
+
+ if (!patMatcher.find()) {
+ /* Grab the search pattern. */
+ Errors.inst.printError(EK_CONS_INVDEFINE, "(no search pattern)");
+ return false;
+ }
+
+ final String searchPattern = patMatcher.group(1);
+ final List<String> replacePatterns = new LinkedList<>();
+
+ while (patMatcher.find()) {
+ /* Grab the replacer patterns. */
+ replacePatterns.add(patMatcher.group(1));
+ }
+
+ final Define dfn = new Define(priority, subMode, doRecur,
+ isCircular, guardPattern, searchPattern,
+ replacePatterns);
+
+ if (dfn.inError) {
+ return false;
+ }
+
+ /* Add the define to the proper place. */
+ if (type == Define.Type.LINE) {
+ eng.addLineDefine(dfn);
+ } else {
+ eng.addTokenDefine(dfn);
+ }
+
+ return true;
+ }
+
+ /**
+ * Main method.
+ *
+ * @param args
+ * CLI arguments.
+ */
+ public static void main(final String[] args) {
+ final DiceLangConsole console = new DiceLangConsole(args);
+ console.run();
+ }
+}
diff --git a/base/src/bjc/dicelang/DiceLangEngine.java b/base/src/bjc/dicelang/DiceLangEngine.java
new file mode 100644
index 0000000..71e5ee8
--- /dev/null
+++ b/base/src/bjc/dicelang/DiceLangEngine.java
@@ -0,0 +1,695 @@
+package bjc.dicelang;
+
+import static bjc.dicelang.Errors.ErrorKey.EK_ENG_NOCLOSING;
+import static bjc.dicelang.Errors.ErrorKey.EK_ENG_NOOPENING;
+import static bjc.dicelang.Token.Type.CBRACE;
+import static bjc.dicelang.Token.Type.CBRACKET;
+import static bjc.dicelang.Token.Type.CPAREN;
+import static bjc.dicelang.Token.Type.OBRACE;
+import static bjc.dicelang.Token.Type.OBRACKET;
+import static bjc.dicelang.Token.Type.OPAREN;
+
+import java.util.Deque;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.logging.Logger;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import bjc.dicelang.scl.StreamEngine;
+import bjc.utils.data.ITree;
+import bjc.utils.funcdata.FunctionalList;
+import bjc.utils.funcdata.FunctionalMap;
+import bjc.utils.funcdata.FunctionalStringTokenizer;
+import bjc.utils.funcdata.IList;
+import bjc.utils.funcdata.IMap;
+import bjc.utils.funcutils.ListUtils;
+import bjc.utils.parserutils.TokenUtils;
+import bjc.utils.parserutils.splitter.ConfigurableTokenSplitter;
+
+/**
+ * Implements the orchestration necessary for processing DiceLang commands.
+ *
+ * @author Ben Culkin
+ */
+public class DiceLangEngine {
+ /* Logger. */
+ private static final Logger LOG = Logger.getLogger(DiceLangEngine.class.getName());
+
+ /*
+ * The random fields that are package private instead of private-private
+ * are for the benefit of the tweaker, so that it can mess around with
+ * them.
+ */
+
+ /* Split tokens around operators with regex */
+ ConfigurableTokenSplitter opExpander;
+
+ /* ID for generation. */
+ int nextLiteral;
+
+ /* Debug indicator. */
+ public boolean debugMode;
+ /* Should we do shunting? */
+ private boolean postfixMode;
+ /* Should we reverse the token stream? */
+ private boolean prefixMode;
+ /* Should we do step-by-step evaluation? */
+ private boolean stepEval;
+
+ /* Shunter for token shunting. */
+ Shunter shunt;
+ /* Tokenizer for tokenizing. */
+ Tokenizer tokenzer;
+ /* Parser for tree construction. */
+ Parser parsr;
+ /* Evaluator for evaluating. */
+ Evaluator eval;
+
+ /* Tables for various things. */
+ /**
+ * The symbol table.
+ */
+ public final IMap<Integer, String> symTable;
+
+ /* String literal tables */
+ private final IMap<Integer, String> stringLits;
+ private final IMap<String, String> stringLiterals;
+
+ /* Lists of defns. */
+ private final IList<Define> lineDefns;
+ private final IList<Define> tokenDefns;
+
+ /* Are defns currently sorted by priority? */
+ private boolean defnsSorted;
+
+ /* Stream engine for processing streams. */
+ StreamEngine streamEng;
+
+ /**
+ * Create a new DiceLang engine.
+ */
+ public DiceLangEngine() {
+ /* Initialize defns. */
+ lineDefns = new FunctionalList<>();
+ tokenDefns = new FunctionalList<>();
+ defnsSorted = true;
+
+ /* Initialize tables. */
+ symTable = new FunctionalMap<>();
+ stringLits = new FunctionalMap<>();
+ stringLiterals = new FunctionalMap<>();
+
+ /* Initialize operator expander. */
+ opExpander = new ConfigurableTokenSplitter(true);
+ opExpander.addMultiDelimiters("(", ")");
+ opExpander.addMultiDelimiters("[", "]");
+ opExpander.addMultiDelimiters("{", "}");
+ opExpander.addSimpleDelimiters(":=");
+ opExpander.addSimpleDelimiters("=>");
+ opExpander.addSimpleDelimiters("//");
+ opExpander.addSimpleDelimiters(".+.");
+ opExpander.addSimpleDelimiters(".*.");
+ opExpander.addSimpleDelimiters("+");
+ opExpander.addSimpleDelimiters("-");
+ opExpander.addSimpleDelimiters("*");
+ opExpander.addSimpleDelimiters("/");
+ opExpander.compile();
+
+ /* Initialize literal IDs */
+ nextLiteral = 1;
+
+ /* Initial mode settings. */
+ debugMode = true;
+ postfixMode = false;
+ prefixMode = false;
+ stepEval = false;
+
+ /* Create components. */
+ shunt = new Shunter();
+ parsr = new Parser();
+
+ streamEng = new StreamEngine(this);
+ tokenzer = new Tokenizer(this);
+ eval = new Evaluator(this);
+ }
+
+ /** Sort defns by priority. */
+ public void sortDefns() {
+ lineDefns.sort(null);
+ tokenDefns.sort(null);
+
+ defnsSorted = true;
+ }
+
+ /**
+ * Add a defn that's applied to lines.
+ *
+ * @param dfn
+ * The defn to add.
+ */
+ public void addLineDefine(final Define dfn) {
+ lineDefns.add(dfn);
+
+ defnsSorted = false;
+ }
+
+ /**
+ * Add a defn that's applied to tokens.
+ *
+ * @param dfn
+ * The defn to add.
+ */
+ public void addTokenDefine(final Define dfn) {
+ tokenDefns.add(dfn);
+
+ defnsSorted = false;
+ }
+
+ /**
+ * Toggle debug mode.
+ *
+ * @return The current state of debug mode.
+ */
+ public boolean toggleDebug() {
+ debugMode = !debugMode;
+
+ return debugMode;
+ }
+
+ /**
+ * Toggle postfix mode.
+ *
+ * @return The current state of postfix mode.
+ */
+ public boolean togglePostfix() {
+ postfixMode = !postfixMode;
+
+ return postfixMode;
+ }
+
+ /**
+ * Toggle prefix mode.
+ *
+ * @return The current state of prefix mode
+ */
+ public boolean togglePrefix() {
+ prefixMode = !prefixMode;
+
+ return prefixMode;
+ }
+
+ /**
+ * Toggle step-eval mode
+ *
+ * @return The current state of step-eval mode
+ */
+ public boolean toggleStepEval() {
+ stepEval = !stepEval;
+
+ return stepEval;
+ }
+
+ /* Matches double-angle bracketed strings. */
+ private final Pattern nonExpandPattern =
+ Pattern.compile("<<([^\\>]*(?:\\>(?:[^\\>])*)*)>>");
+
+ /**
+ * Run a command to completion.
+ *
+ * @param command
+ * The command to run
+ *
+ * @return Whether or not the command ran successfully
+ */
+ public boolean runCommand(final String command) {
+ /* Preprocess the command into tokens */
+ /* @NOTE
+ * Instead of strings, this should maybe use a RawToken
+ * class or something.
+ */
+ final IList<String> preprocessedTokens = preprocessCommand(command);
+
+ if (preprocessedTokens == null) {
+ return false;
+ }
+
+ /* Lex the string tokens into token-tokens */
+ final IList<Token> lexedTokens = lexTokens(preprocessedTokens);
+
+ if (lexedTokens == null) {
+ return false;
+ }
+
+ /* Parse the tokens into an AST forest */
+ final IList<ITree<Node>> astForest = new FunctionalList<>();
+ final boolean succ = Parser.parseTokens(lexedTokens, astForest);
+
+ if (!succ) {
+ return false;
+ }
+
+ /* Evaluate the AST forest */
+ evaluateForest(astForest);
+ return true;
+ }
+
+ /* Lex string tokens into token-tokens */
+ private IList<Token> lexTokens(final IList<String> preprocessedTokens) {
+ final IList<Token> lexedTokens = new FunctionalList<>();
+
+ for (final String token : preprocessedTokens) {
+ String newTok = token;
+
+ /* Apply token defns */
+ for (final Define dfn : tokenDefns.toIterable()) {
+ /* @NOTE
+ * What happens with a define that produces
+ * multiple tokens from one token?
+ */
+ newTok = dfn.apply(newTok);
+ }
+
+ /* Lex the token */
+ final Token tk = tokenzer.lexToken(token, stringLiterals);
+
+ if (debugMode) {
+ LOG.finer(String.format("lexed token: %s\n", tk));
+ }
+
+ if (tk == null) {
+ /* Ignore blank tokens */
+ continue;
+ } else if (tk == Token.NIL_TOKEN) {
+ /* Fail on bad tokens */
+ return null;
+ } else {
+ lexedTokens.add(tk);
+ }
+ }
+
+ if (debugMode) {
+ String msg = String.format("\tCommand after tokenization: %s\n", lexedTokens.toString());
+ LOG.fine(msg);
+ System.out.print(msg);
+ }
+
+ /* Preshunt preshunt-marked groups of tokens */
+ IList<Token> shuntedTokens = lexedTokens;
+ final IList<Token> preparedTokens = new FunctionalList<>();
+
+ boolean succ = removePreshuntTokens(lexedTokens, preparedTokens);
+
+ if (!succ) {
+ return null;
+ }
+
+ if (debugMode && !postfixMode) {
+ String msg = String.format("\tCommand after pre-shunter removal: %s\n",
+ preparedTokens.toString());
+ LOG.fine(msg);
+ System.out.print(msg);
+ }
+
+ /* Only shunt if we're not in a special mode. */
+ if (!postfixMode && !prefixMode) {
+ /* Shunt the tokens */
+ shuntedTokens = new FunctionalList<>();
+ succ = shunt.shuntTokens(preparedTokens, shuntedTokens);
+
+ if (!succ) {
+ return null;
+ }
+ } else if (prefixMode) {
+ /* Reverse directional tokens */
+ /*
+ * @NOTE
+ * Merge these two operations into one iteration
+ * over the list?
+ */
+ preparedTokens.reverse();
+ shuntedTokens = preparedTokens.map(this::reverseToken);
+ }
+
+ if (debugMode && !postfixMode) {
+ String msg = String.format("\tCommand after shunting: %s\n", shuntedTokens.toString());
+ LOG.fine(msg);
+ System.out.print(msg);
+ }
+
+ /* Expand token groups */
+ final IList<Token> readyTokens = shuntedTokens.flatMap(tk -> {
+ if (tk.type == Token.Type.TOKGROUP ||
+ tk.type == Token.Type.TAGOP ||
+ tk.type == Token.Type.TAGOPR ) {
+ LOG.finer(String.format("Expanding token group to: %s\n", tk.tokenValues.toString()));
+ return tk.tokenValues;
+ } else {
+ return new FunctionalList<>(tk);
+ }
+ });
+
+ if (debugMode && !postfixMode) {
+ String msg = String.format("\tCommand after re-preshunting: %s\n",
+ readyTokens.toString());
+ LOG.fine(msg);
+ System.out.print(msg);
+ }
+
+ return readyTokens;
+ }
+
+ /*
+ * Reverse orientation-sensitive tokens.
+ *
+ * These are things like (, {, and [
+ */
+ private Token reverseToken(final Token tk) {
+ switch (tk.type) {
+ case OBRACE:
+ return new Token(CBRACE, tk.intValue);
+ case OPAREN:
+ return new Token(CPAREN, tk.intValue);
+ case OBRACKET:
+ return new Token(CBRACKET, tk.intValue);
+ case CBRACE:
+ return new Token(OBRACE, tk.intValue);
+ case CPAREN:
+ return new Token(OPAREN, tk.intValue);
+ case CBRACKET:
+ return new Token(OBRACKET, tk.intValue);
+ default:
+ return tk;
+ }
+ }
+
+ /* Preprocess a command into a list of string tokens. */
+ private IList<String> preprocessCommand(final String command) {
+ /* Sort the defines if they aren't sorted */
+ if (!defnsSorted) {
+ sortDefns();
+ }
+
+ /* Run the tokens through the stream engine */
+ final IList<String> streamToks = new FunctionalList<>();
+ final boolean succ = streamEng.doStreams(command.split(" "),
+ streamToks);
+
+ if (!succ) {
+ return null;
+ }
+
+ String newComm = ListUtils.collapseTokens(streamToks, " ");
+
+ if (debugMode) {
+ String msg = String.format("\tCommand after stream commands: %s\n", newComm);
+ LOG.fine(msg);
+ System.out.print(msg);
+ }
+
+ /* Apply line defns */
+ for (final Define dfn : lineDefns.toIterable()) {
+ newComm = dfn.apply(newComm);
+ }
+
+ if (debugMode) {
+ String msg = String.format("\tCommand after line defines: %s\n", newComm);
+ LOG.fine(msg);
+ System.out.print(msg);
+ }
+
+ /* Remove string literals. */
+ final List<String> destringedParts = TokenUtils.removeDQuotedStrings(newComm);
+ final StringBuffer destringedCommand = new StringBuffer();
+
+ for (final String part : destringedParts) {
+ /* Handle string literals */
+ if (part.startsWith("\"") && part.endsWith("\"")) {
+ /* Get the actual string. */
+ final String litName = "stringLiteral" + nextLiteral;
+ final String litVal = part.substring(1, part.length() - 1);
+
+ /*
+ * Insert the string with its escape sequences
+ * interpreted.
+ */
+ final String descVal = TokenUtils.descapeString(litVal);
+ stringLiterals.put(litName, descVal);
+
+ if (debugMode)
+ LOG.finer(String.format("Replaced string literal '%s' with literal no. %d",
+ descVal, nextLiteral));
+
+ nextLiteral += 1;
+
+ /* Place a ref. to the string in the command */
+ destringedCommand.append(" " + litName + " ");
+ } else {
+ destringedCommand.append(part);
+ }
+ }
+
+ if (debugMode) {
+ String msg = String.format("\tCommand after destringing: %s\n", destringedCommand);
+ LOG.fine(msg);
+ System.out.print(msg);
+
+ /* Print the string table if it exists. */
+ if (stringLiterals.size() > 0) {
+ System.out.println("\tString literals in table");
+
+ stringLiterals.forEach((key, val) -> {
+ System.out.printf("\t\tName: (%s)\tValue: (%s)\n", key, val);
+ });
+ }
+ }
+
+ /* Split the command into tokens */
+ final String strang = destringedCommand.toString();
+ IList<String> tokens = FunctionalStringTokenizer.fromString(strang).toList();
+
+ /* Temporarily remove non-expanding tokens */
+ final IMap<String, String> nonExpandedTokens = new FunctionalMap<>();
+ tokens = tokens.map(tk -> {
+ final Matcher nonExpandMatcher = nonExpandPattern.matcher(tk);
+
+ if (nonExpandMatcher.matches()) {
+ final String tkName = "nonExpandToken" + nextLiteral++;
+ nonExpandedTokens.put(tkName, nonExpandMatcher.group(1));
+
+ LOG.finer(String.format("Pulled non-expander '%s' to '%s'", nonExpandMatcher.group(1),
+ tkName));
+ return tkName;
+ }
+
+ return tk;
+ });
+
+ if (debugMode) {
+ String msg = String.format("\tCommand after removal of non-expanders: %s\n",
+ tokens.toString());
+ LOG.fine(msg);
+ System.out.print(msg);
+ }
+
+ /* Expand tokens */
+ IList<String> fullyExpandedTokens = tokens.flatMap(opExpander::split);
+
+ if (debugMode) {
+ String msg = String.format("\tCommand after token expansion: %s\n",
+ fullyExpandedTokens.toString());
+ LOG.fine(msg);
+ System.out.print(msg);
+ }
+
+ /* Reinsert non-expanded tokens */
+ fullyExpandedTokens = fullyExpandedTokens.map(tk -> {
+ if (tk.startsWith("nonExpandToken")) return nonExpandedTokens.get(tk);
+
+ return tk;
+ });
+
+ if (debugMode) {
+ String msg = String.format("\tCommand after non-expander reinsertion: %s\n",
+ fullyExpandedTokens.toString());
+ LOG.fine(msg);
+ System.out.print(msg);
+ }
+
+ return fullyExpandedTokens;
+ }
+
+ /* Evaluate a forest of AST nodes. */
+ private void evaluateForest(final IList<ITree<Node>> astForest) {
+ int treeNo = 1;
+
+ for (final ITree<Node> ast : astForest) {
+ if (debugMode) {
+ System.out.printf("\t\tTree %d in forest:\n%s\n", treeNo, ast.toString());
+ }
+
+ if (debugMode && stepEval) {
+ /*
+ * @NOTE
+ * This is broken until stepwise top-down
+ * tree transforms are fixed.
+ */
+ int step = 1;
+
+ /* Evaluate it step by step */
+ for (final Iterator<ITree<Node>> itr = eval.stepDebug(ast); itr.hasNext();) {
+ final ITree<Node> nodeStep = itr.next();
+
+ System.out.printf("\t\tStep %d: Node is %s", step, nodeStep);
+
+ /* Don't evaluate null steps */
+ if (nodeStep == null) {
+ System.out.println();
+
+ step += 1;
+ continue;
+ }
+
+ /* Print out details for results */
+ if (nodeStep.getHead().type == Node.Type.RESULT) {
+ final EvaluatorResult res = nodeStep.getHead().resultVal;
+
+ System.out.printf(" (result is %s", res);
+
+ if (res.type == EvaluatorResult.Type.DICE) {
+ System.out.printf(" (sample roll %s)", res.diceVal.value());
+ }
+
+ if (res.origVal != null) {
+ System.out.printf(" (original tree is %s)", res.origVal);
+ }
+
+ System.out.printf(")");
+ }
+
+ /* Advance a step */
+ System.out.println();
+ step += 1;
+ }
+ } else {
+ /* Evaluate it normally */
+ final EvaluatorResult res = eval.evaluate(ast);
+
+ if (debugMode) {
+ System.out.printf("\t\tEvaluates to %s", res);
+
+ if (res.type == EvaluatorResult.Type.DICE) {
+ System.out.println("\t\t (sample roll " + res.diceVal.value() + ")");
+ }
+ }
+ }
+
+ System.out.println();
+
+ treeNo += 1;
+ }
+ }
+
+ /* Preshunt preshunt-marked groups of tokens. */
+ private boolean removePreshuntTokens(final IList<Token> lexedTokens,
+ final IList<Token> preparedTokens) {
+ /* Current nesting level of tokens. */
+ int curBraceCount = 0;
+
+ /* Data storage. */
+ final Deque<IList<Token>> bracedTokens = new LinkedList<>();
+ IList<Token> curBracedTokens = new FunctionalList<>();
+
+ for (final Token tk : lexedTokens) {
+ if (tk.type == Token.Type.OBRACE && tk.intValue == 2) {
+ /* Open a preshunt group. */
+ curBraceCount += 1;
+
+ if (curBraceCount != 1) {
+ /*
+ * Push the old group onto the group
+ * stack.
+ */
+ bracedTokens.push(curBracedTokens);
+ }
+
+ curBracedTokens = new FunctionalList<>();
+ } else if (tk.type == Token.Type.CBRACE && tk.intValue == 2) {
+ /* Close a preshunt group. */
+ if (curBraceCount == 0) {
+ /*
+ * Error if there couldn't have been an
+ * opening.
+ */
+ Errors.inst.printError(EK_ENG_NOOPENING);
+ return false;
+ }
+
+ curBraceCount -= 1;
+
+ final IList<Token> preshuntTokens = new FunctionalList<>();
+
+ /* Shunt preshunt group. */
+ final boolean success = shunt.shuntTokens(curBracedTokens, preshuntTokens);
+
+ if (debugMode) {
+ System.out.println("\t\tPreshunted " + curBracedTokens + " into "
+ + preshuntTokens);
+ }
+
+ if (!success) {
+ return false;
+ }
+
+ if (curBraceCount >= 1) {
+ /*
+ * Add the preshunt group to the
+ * previous group.
+ */
+ curBracedTokens = bracedTokens.pop();
+
+ curBracedTokens.add(new Token(Token.Type.TOKGROUP, preshuntTokens));
+ } else {
+ /*
+ * Add the preshunt group to the token
+ * stream.
+ */
+ preparedTokens.add(new Token(Token.Type.TOKGROUP, preshuntTokens));
+ }
+ } else {
+ /*
+ * Add the token to the active preshunt group,
+ * if there is one..
+ */
+ if (curBraceCount >= 1) {
+ curBracedTokens.add(tk);
+ } else {
+ preparedTokens.add(tk);
+ }
+ }
+ }
+
+ if (curBraceCount > 0) {
+ /* There was an unclosed group. */
+ Errors.inst.printError(EK_ENG_NOCLOSING);
+ return false;
+ }
+
+ return true;
+ }
+
+ /* Get a string literal from the string literal table. */
+ String getStringLiteral(final int key) {
+ return stringLits.get(key);
+ }
+
+ /* Add a string literal to the string literal table. */
+ /* @NOTE
+ * The string literal table should be abstracted into some kind of
+ * auto-numbered map thing.
+ */
+ void addStringLiteral(final int key, final String val) {
+ stringLits.put(key, val);
+ }
+}
diff --git a/base/src/bjc/dicelang/Errors.java b/base/src/bjc/dicelang/Errors.java
new file mode 100644
index 0000000..255db75
--- /dev/null
+++ b/base/src/bjc/dicelang/Errors.java
@@ -0,0 +1,523 @@
+package bjc.dicelang;
+
+/**
+ * Repository for error messages.
+ *
+ * @TODO 10/08/17 Ben Culkin :ErrorRefactor
+ * This way of handling error messages is not easy to deal with. Something
+ * else needs to be done, but I'm not sure what at the moment.
+ *
+ * @author EVE
+ *
+ */
+public class Errors {
+ /**
+ * The types of error message.
+ *
+ * @author EVE
+ *
+ */
+ public static enum ErrorKey {
+ /* Define Errors */
+ /**
+ * Incorrect define guard syntax
+ */
+ EK_DFN_PREDSYN,
+ /**
+ * Incorrect define search syntax
+ */
+ EK_DFN_SRCSYN,
+ /**
+ * Recursive define recursed too many times
+ */
+ EK_DFN_RECUR,
+
+ /* Console Errors */
+ /**
+ * Unknown console pragma
+ */
+ EK_CONS_INVPRAG,
+ /**
+ * Improperly formatted define
+ */
+ EK_CONS_INVDEFINE,
+
+ /* Language Engine Errors */
+ /**
+ * Found closing double-brace w/out opening double-brace
+ */
+ EK_ENG_NOOPENING,
+ /**
+ * Reached end of command w/out balanced double-braces
+ */
+ EK_ENG_NOCLOSING,
+
+ /* Tokenizer Errors */
+ /**
+ * Found an unexpected grouping token
+ */
+ EK_TOK_UNGROUP,
+ /**
+ * Invalid base for a flexadecimal number
+ */
+ EK_TOK_INVBASE,
+ /**
+ * Invalid flexadecimal number in a given base
+ */
+ EK_TOK_INVFLEX,
+
+ /* Evaluator Errors */
+ /**
+ * Unknown node type
+ */
+ EK_EVAL_INVNODE,
+ /**
+ * Incorrect # of args to binary operator
+ */
+ EK_EVAL_INVBIN,
+ /**
+ * Incorrect # of args to unary operator
+ */
+ EK_EVAL_INVUNARY,
+ /**
+ * Unknown binary operator
+ */
+ EK_EVAL_UNBIN,
+ /**
+ * Unknown unary operator
+ */
+ EK_EVAL_UNUNARY,
+ /**
+ * Math on strings doesn't work
+ */
+ EK_EVAL_STRINGMATH,
+ /**
+ * Attempted divide by zero
+ */
+ EK_EVAL_DIVZERO,
+ /**
+ * Attempted to divide dice
+ */
+ EK_EVAL_DIVDICE,
+ /**
+ * Unknown math operator
+ */
+ EK_EVAL_UNMATH,
+ /**
+ * Unknown token reference
+ */
+ EK_EVAL_UNTOK,
+ /**
+ * Unknown dice operator
+ */
+ EK_EVAL_UNDICE,
+ /**
+ * Incorrect type to dice group operator
+ */
+ EK_EVAL_INVDGROUP,
+ /**
+ * Incorrect type to dice creation operator
+ */
+ EK_EVAL_INVDCREATE,
+ /**
+ * Incorrect type to other dice operator
+ */
+ EK_EVAL_INVDICE,
+ /**
+ * Mismatched types to math operator
+ */
+ EK_EVAL_MISMATH,
+ /**
+ * Incorrect type to string operator
+ */
+ EK_EVAL_INVSTRING,
+ /**
+ * Unknown string operator
+ */
+ EK_EVAL_UNSTRING,
+
+ /* Parser Error */
+ /**
+ * Group closing where there couldn't be an opener
+ */
+ EK_PARSE_NOCLOSE,
+ /**
+ * Group closing without group opener
+ */
+ EK_PARSE_UNCLOSE,
+ /**
+ * Incorrect # of arguments to binary operator
+ */
+ EK_PARSE_BINARY,
+ /**
+ * Not enough operands to binary operator
+ */
+ EK_PARSE_UNOPERAND,
+ /**
+ * Unrecognized token type
+ */
+ EK_PARSE_INVTOKEN,
+
+ /* Shunter Error */
+ /**
+ * Unary operator expected a operand, but got an operator
+ */
+ EK_SHUNT_NOTADV,
+ /**
+ * Unary operator expected an operator, but got an operand
+ */
+ EK_SHUNT_NOTADJ,
+ /**
+ * Unary operator expected an operator, but didn't find one
+ */
+ EK_SHUNT_NOOP,
+ /**
+ * Asked for opening grouping operator, but couldn't find one
+ */
+ EK_SHUNT_NOGROUP,
+ /**
+ * No group for group seperator to attach to
+ */
+ EK_SHUNT_INVSEP,
+ /**
+ * Attempted to chain non-associative operator
+ */
+ EK_SHUNT_NOTASSOC,
+
+ /* Stream Errors */
+ /**
+ * Attempted to switch to a non-existant stream
+ */
+ EK_STRM_NONEX,
+ /**
+ * Can't delete the last stream
+ */
+ EK_STRM_LAST,
+ /**
+ * Unknown stream command
+ */
+ EK_STRM_INVCOM,
+
+ /* SCL Errors */
+ /**
+ * Unknown SCL token
+ */
+ EK_SCL_INVTOKEN,
+ /**
+ * Mismatched quote in SCL command
+ */
+ EK_SCL_MMQUOTE,
+ /**
+ * Stack underflow in SCL command
+ */
+ EK_SCL_SUNDERFLOW,
+ /**
+ * Unknown word in SCL command
+ */
+ EK_SCL_UNWORD,
+ /**
+ * Invalid argument to SCL command
+ */
+ EK_SCL_INVARG,
+
+ /* CLI Argument Errors */
+ /**
+ * Unknown CLI argument
+ */
+ EK_CLI_UNARG,
+ /**
+ * Missing sub-argument to argument
+ */
+ EK_CLI_MISARG,
+ /**
+ * Invalid define type
+ */
+ EK_CLI_INVDFNTYPE,
+
+ /* Miscellaneous errors */
+ /**
+ * Unknown I/O problem
+ */
+ EK_MISC_IOEX,
+ /**
+ * File not found
+ */
+ EK_MISC_NOFILE,
+
+ /* Dice errors. */
+ /* Recieved the wrong sort of expression to a die. */
+ EK_DICE_INVTYPE,
+ }
+
+ /**
+ * The mode for the type of error messages to print out.
+ *
+ * @author EVE
+ *
+ */
+ public static enum ErrorMode {
+ /**
+ * Output error messages for wizards.
+ */
+ WIZARD,
+ /**
+ * Output error messages for developers.
+ */
+ DEV
+ }
+
+ private ErrorMode mode;
+
+ /**
+ * Print an error.
+ *
+ * @param key
+ * The key of the error.
+ *
+ * @param args
+ * The arguments for the error.
+ */
+ public void printError(final ErrorKey key, final String... args) {
+ switch (mode) {
+ case WIZARD:
+ if (key == ErrorKey.EK_MISC_NOFILE) {
+ System.out.println("\t? 404");
+ } else {
+ System.out.println("\t? " + key.ordinal());
+ }
+
+ break;
+
+ case DEV:
+ devError(key, args);
+ break;
+
+ default:
+ System.out.println("\tERROR ERROR: Unknown error mode " + mode);
+ }
+ }
+
+ private static void devError(final ErrorKey key, final String[] args) {
+ switch (key) {
+ case EK_DFN_PREDSYN:
+ System.out.printf("\tERROR: Incorrect define guard syntax %s\n", args[0]);
+ break;
+
+ case EK_DFN_SRCSYN:
+ System.out.printf("\tERROR: Incorrect define match syntax %s\n", args[0]);
+ break;
+
+ case EK_DFN_RECUR:
+ System.out.printf(
+ "\tERROR: Recursive define didn't converge after %s iterations."
+ + " Original string was %s, last iteration was %s\n",
+ args[0], args[1], args[2]);
+ break;
+
+ case EK_CONS_INVPRAG:
+ System.out.printf("\tERROR: Unknown pragma %s\n", args[0]);
+ break;
+
+ case EK_CONS_INVDEFINE:
+ System.out.printf("\tERROR: Improperly formatted define %s\n", args[0]);
+ break;
+
+ case EK_ENG_NOOPENING:
+ System.out.printf("\tERROR: Encountered closing doublebrace without"
+ + " matching opening doublebrace\n");
+ break;
+
+ case EK_ENG_NOCLOSING:
+ System.out.printf("\tERROR: Reached end of string before closing doublebrace was found\n");
+ break;
+
+ case EK_TOK_UNGROUP:
+ System.out.printf("\tERROR: Unrecognized grouping token %s\n", args[0]);
+ break;
+
+ case EK_TOK_INVBASE:
+ System.out.printf("\tERROR: Invalid flexadecimal base %s\n", args[0]);
+ break;
+
+ case EK_TOK_INVFLEX:
+ System.out.printf("\tERROR: Invalid flexadecimal number %s in base %s\n", args[0],
+ args[1]);
+ break;
+
+ case EK_EVAL_INVNODE:
+ System.out.printf("\tERROR: Unknown node in evaluator: %s\n", args[0]);
+ break;
+
+ case EK_EVAL_INVBIN:
+ System.out.printf("\tERROR: Binary operators take 2 operands, not %s\n"
+ + "\tProblem node is %s\n", args[0], args[1]);
+ break;
+
+ case EK_EVAL_UNBIN:
+ System.out.printf("\tERROR: Unknown binary operator %s\n", args[0]);
+ break;
+
+ case EK_EVAL_STRINGMATH:
+ System.out.printf("\tERROR: Math operators don't work on strings\n");
+ break;
+
+ case EK_EVAL_DIVZERO:
+ System.out.printf("\tERROR: Attempted divide by zero\n");
+ break;
+
+ case EK_EVAL_DIVDICE:
+ System.out.printf("\tERROR: Dice cannot be divided\n");
+ break;
+
+ case EK_EVAL_UNMATH:
+ System.out.printf("\tERROR: Unknown math binary operator: %s\n", args[0]);
+ break;
+
+ case EK_EVAL_UNTOK:
+ System.out.printf("\tERROR: Unknown token ref %s\n", args[0]);
+ break;
+
+ case EK_EVAL_UNDICE:
+ System.out.printf("\tERROR: Unknown dice operator %s\n", args[0]);
+ break;
+
+ case EK_EVAL_INVDCREATE:
+ System.out.printf("\tERROR: Dice creation operator expects integers," + " not %s\n",
+ args[0]);
+ break;
+
+ case EK_EVAL_INVDGROUP:
+ System.out.printf("\tERROR: Dice group operator expects scalar dice or integers," +
+ " not %s\n",
+ args[0]);
+ break;
+
+ case EK_EVAL_INVDICE:
+ System.out.printf("\tERROR: Dice operators expect scalar dice, not %s\n", args[0]);
+ break;
+
+ case EK_EVAL_MISMATH:
+ System.out.printf("\tERROR: Math operators expect two operands of the same type\n");
+ break;
+
+ case EK_EVAL_INVSTRING:
+ System.out.printf("\tERROR: Incorrect type %s to string operator\n", args[0]);
+ break;
+
+ case EK_EVAL_UNSTRING:
+ System.out.printf("\tERROR: Unknown string operator %s\n", args[0]);
+ break;
+
+ case EK_PARSE_NOCLOSE:
+ System.out.printf("\tERROR: Group closing with no possible group opener\n");
+ break;
+
+ case EK_PARSE_UNCLOSE:
+ System.out.printf("\tERROR: Found group closer without opener: (closing was %s"
+ + ", expected %s)\n", args[0], args[1]);
+ break;
+
+ case EK_PARSE_BINARY:
+ System.out.printf("\tERROR: Expected at least two operands\n");
+ break;
+
+ case EK_PARSE_UNOPERAND:
+ System.out.printf("\tERROR: Operator %s expected more operands than provided\n", args[0]);
+ break;
+
+ case EK_PARSE_INVTOKEN:
+ System.out.printf("\tERROR: Unrecognized token type in parsing: %s\n", args[0]);
+ break;
+
+ case EK_SHUNT_NOTADV:
+ System.out.printf("\tERROR: Unary operator %s is an adjective, not an adverb. It can't be"
+ + " applied to the operator %s\n", args[0], args[1]);
+ break;
+
+ case EK_SHUNT_NOTADJ:
+ System.out.printf("\tERROR: Unary operator %s is an adjective, not an adverb. It can't be"
+ + " applied to the operator %s\n", args[0], args[1]);
+ break;
+
+ case EK_SHUNT_NOOP:
+ System.out.printf("\tERROR: Unary operator %s is an adverb, but there is no operator"
+ + " to apply it to\n", args[0]);
+ break;
+
+ case EK_SHUNT_NOGROUP:
+ System.out.printf("\tERROR: Couldn't find matching grouping %s (expected %s)\n", args[0],
+ args[1]);
+ break;
+
+ case EK_SHUNT_NOTASSOC:
+ System.out.printf("\tERROR: Attempted to chain non-associative operator %s\n", args[0]);
+ break;
+
+ case EK_SHUNT_INVSEP:
+ System.out.printf("\tERROR: Couldn't find grouper for group seperator to attach to\n");
+ break;
+
+ case EK_STRM_NONEX:
+ System.out.printf("\tERROR: Attempted to switch to non-existent stream\n");
+ break;
+
+ case EK_STRM_LAST:
+ System.out.printf("\tERROR: Cannot delete last stream\n");
+ break;
+
+ case EK_STRM_INVCOM:
+ System.out.printf("\tERROR: Unknown stream control command %s\n", args[0]);
+ break;
+
+ case EK_SCL_INVTOKEN:
+ System.out.printf("\tERROR: Unknown SCL token %s\n", args[0]);
+ break;
+
+ case EK_SCL_MMQUOTE:
+ System.out.printf("\tERROR: Mismatched delimiter in SCL command\n");
+ break;
+
+ case EK_SCL_SUNDERFLOW:
+ System.out.printf("\tERROR: Not enough items in stack for word %s\n", args[0]);
+ break;
+
+ case EK_SCL_UNWORD:
+ System.out.printf("\tERROR: Unknown word %s\n", args[0]);
+ break;
+
+ case EK_CLI_UNARG:
+ System.out.printf("\tERROR: Unknown argument %s\n", args[0]);
+ break;
+
+ case EK_CLI_MISARG:
+ System.out.printf("\tERROR: Missing subargument to command %s", args[0]);
+ break;
+
+ case EK_CLI_INVDFNTYPE:
+ System.out.printf("\tERROR: Invalid define type %s\n", args[0]);
+ break;
+
+ case EK_MISC_IOEX:
+ System.out.printf("\tERROR: I/O problem with file\n");
+ break;
+
+ case EK_MISC_NOFILE:
+ System.out.printf("\tERROR: No such file %s\n", args[0]);
+ break;
+
+ default:
+ System.out.printf("\tERROR ERROR: Unknown error key %s\n", key);
+ }
+ }
+
+ /**
+ * The instance of the errors.
+ */
+ public final static Errors inst;
+
+ static {
+ inst = new Errors();
+
+ inst.mode = ErrorMode.DEV;
+ }
+}
diff --git a/base/src/bjc/dicelang/Evaluator.java b/base/src/bjc/dicelang/Evaluator.java
new file mode 100644
index 0000000..d817c42
--- /dev/null
+++ b/base/src/bjc/dicelang/Evaluator.java
@@ -0,0 +1,603 @@
+package bjc.dicelang;
+
+import java.util.Deque;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.function.Consumer;
+
+import bjc.dicelang.dice.CompoundDie;
+import bjc.dicelang.dice.Die;
+import bjc.dicelang.dice.FudgeDie;
+import bjc.dicelang.dice.MathDie;
+import bjc.dicelang.dice.ScalarDie;
+import bjc.dicelang.dice.SimpleDie;
+import bjc.dicelang.dice.SimpleDieList;
+
+import bjc.utils.data.ITree;
+import bjc.utils.data.SingleIterator;
+import bjc.utils.data.TopDownTransformIterator;
+import bjc.utils.data.TopDownTransformResult;
+import bjc.utils.data.Tree;
+
+import static bjc.dicelang.Errors.ErrorKey.*;
+import static bjc.dicelang.EvaluatorResult.Type.DICE;
+import static bjc.dicelang.EvaluatorResult.Type.FAILURE;
+import static bjc.dicelang.EvaluatorResult.Type.FLOAT;
+import static bjc.dicelang.EvaluatorResult.Type.INT;
+import static bjc.dicelang.EvaluatorResult.Type.STRING;
+
+
+/* @TODO 10/09/17 Ben Culkin :EvaluatorSplit
+ * Type/sanity checking should be moved into a seperate stage, not part of
+ * evaluation.
+ */
+/**
+ * Evaluate DiceLang ASTs
+ *
+ * @author EVE
+ *
+ */
+public class Evaluator {
+ /* The steps of type coercion. */
+ private static enum CoerceSteps {
+ INTEGER, DOUBLE;
+ }
+
+ /* The context during iteration. */
+ private static class Context {
+ public Consumer<Iterator<ITree<Node>>> thunk;
+
+ public boolean isDebug;
+
+ public Context() {
+ /* Empty block. */
+ }
+ }
+
+ /* @TODO 10/09/17 Ben Culkin :NodeFAIL
+ * These methods should be moved to Node.
+ */
+ /* Create a failing node. */
+ private static Node FAIL() {
+ return new Node(Node.Type.RESULT, new EvaluatorResult(FAILURE));
+ }
+
+ private static Node FAIL(final ITree<Node> orig) {
+ return new Node(Node.Type.RESULT, new EvaluatorResult(FAILURE, orig));
+ }
+
+ private static Node FAIL(final Node orig) {
+ return new Node(Node.Type.RESULT, new EvaluatorResult(FAILURE, orig));
+ }
+
+ private static Node FAIL(final EvaluatorResult res) {
+ EvaluatorResult eres = new EvaluatorResult(FAILURE, new Node(Node.Type.RESULT, res));
+ return new Node(Node.Type.RESULT, eres);
+ }
+
+ /* The engine we are connected to. */
+ private final DiceLangEngine eng;
+
+ /**
+ * Create a new evaluator.
+ *
+ * @param en
+ * The engine.
+ */
+ public Evaluator(final DiceLangEngine en) {
+ eng = en;
+ }
+
+ /**
+ * Evaluate a AST.
+ *
+ * @param comm
+ * The AST to evaluate.
+ *
+ * @return The result of the tree.
+ */
+ public EvaluatorResult evaluate(final ITree<Node> comm) {
+ final Context ctx = new Context();
+
+ ctx.isDebug = false;
+ ctx.thunk = itr -> {
+ /*
+ * Deliberately finish the iterator, but ignore results.
+ * It's only for stepwise evaluation, but we don't know
+ * if stepping the iterator has side effects.
+ */
+ while (itr.hasNext()) {
+ itr.next();
+ }
+ };
+
+ /* The result. */
+ final ITree<Node> res = comm.topDownTransform(
+ this::pickEvaluationType,
+ node -> this.evaluateNode(node, ctx));
+
+ return res.getHead().resultVal;
+ }
+
+ /* @NOTE
+ * This is broken until stepwise top-down transforms are fixed. */
+ public Iterator<ITree<Node>> stepDebug(final ITree<Node> comm) {
+ final Context ctx = new Context();
+
+ ctx.isDebug = true;
+
+ return new TopDownTransformIterator<>(this::pickEvaluationType, (node, thnk) -> {
+ ctx.thunk = thnk;
+
+ return this.evaluateNode(node, ctx);
+ }, comm);
+ }
+
+ /* Pick the way to evaluate a node. */
+ private TopDownTransformResult pickEvaluationType(final Node nd) {
+ switch (nd.type) {
+ case UNARYOP:
+ switch (nd.operatorType) {
+ case COERCE:
+ /* Coerce does special things to the tree. */
+ return TopDownTransformResult.RTRANSFORM;
+ default:
+ return TopDownTransformResult.PUSHDOWN;
+ }
+
+ default:
+ return TopDownTransformResult.PUSHDOWN;
+ }
+ }
+
+ /* Evaluate a node. */
+ private ITree<Node> evaluateNode(final ITree<Node> ast, final Context ctx) {
+ switch (ast.getHead().type) {
+ case UNARYOP:
+ return evaluateUnaryOp(ast, ctx);
+ case BINOP:
+ return evaluateBinaryOp(ast, ctx);
+ case TOKREF:
+ return evaluateTokenRef(ast.getHead().tokenVal, ctx);
+ case ROOT:
+ return ast.getChild(ast.getChildrenCount() - 1);
+ case RESULT:
+ return ast;
+ default:
+ Errors.inst.printError(EK_EVAL_INVNODE, ast.getHead().type.toString());
+ return new Tree<>(FAIL(ast));
+ }
+ }
+
+ /* Evaluate a unary operator. */
+ private ITree<Node> evaluateUnaryOp(final ITree<Node> ast, final Context ctx) {
+ /* Unary operators only take one operand. */
+ if (ast.getChildrenCount() != 1) {
+ Errors.inst.printError(EK_EVAL_UNUNARY, Integer.toString(ast.getChildrenCount()));
+ return new Tree<>(FAIL(ast));
+ }
+
+ switch (ast.getHead().operatorType) {
+ /*
+ * @TODO 10/09/17 Ben Culkin :CoerceRefactor :EvaluatorSplit
+ * Coercing should be moved to its own class, or at the
+ * very least its own method. When the evaluator splits,
+ * this node type'll be handled exclusively by the
+ * type-checker.
+ *
+ * Coerce also needs to be able to coerce things to
+ * dice and ratios (whenever they get added).
+ */
+ case COERCE:
+ final ITree<Node> toCoerce = ast.getChild(0);
+ final ITree<Node> retVal = new Tree<>(toCoerce.getHead());
+ final Deque<ITree<Node>> children = new LinkedList<>();
+
+ /* The current type we are coercing to. */
+ CoerceSteps curLevel = CoerceSteps.INTEGER;
+
+ for (int i = 0; i < toCoerce.getChildrenCount(); i++) {
+ final ITree<Node> child = toCoerce.getChild(i);
+ ITree<Node> nChild = null;
+
+ /* Tell our thunk we processed a node. */
+ if (ctx.isDebug) {
+ /* Evaluate each step of the child. */
+ final Iterator<ITree<Node>> nd = stepDebug(child);
+
+ for (; nd.hasNext(); nChild = nd.next()) {
+ ctx.thunk.accept(new SingleIterator<>(child));
+ }
+ } else {
+ /* Evaluate the child. */
+ nChild = new Tree<>(new Node(Node.Type.RESULT, evaluate(child)));
+
+ ctx.thunk.accept(new SingleIterator<>(nChild));
+ }
+
+ if (nChild == null) {
+ Errors.inst.printError(EK_EVAL_INVNODE);
+ return new Tree<>(FAIL(ast));
+ }
+
+ final Node childNode = nChild.getHead();
+ final EvaluatorResult res = childNode.resultVal;
+
+ /* Move up to coercing to a float. */
+ if (res.type == FLOAT) {
+ curLevel = CoerceSteps.DOUBLE;
+ }
+
+ children.add(nChild);
+ }
+
+ for (final ITree<Node> child : children) {
+ final Node nd = child.getHead();
+ final EvaluatorResult res = nd.resultVal;
+
+ switch (res.type) {
+ case INT:
+ /* Coerce ints to doubles if we need to. */
+ if (curLevel == CoerceSteps.DOUBLE) {
+ nd.resultVal = new EvaluatorResult(FLOAT, (double) res.intVal);
+ }
+ default:
+ /* Do nothing */
+ break;
+ }
+
+ retVal.addChild(child);
+ }
+
+ return retVal;
+ case DICESCALAR:
+ final EvaluatorResult opr = ast.getChild(0).getHead().resultVal;
+
+ if (opr.type != INT) {
+ Errors.inst.printError(EK_EVAL_INVDCREATE, opr.type.toString());
+ }
+
+ final EvaluatorResult sres = new EvaluatorResult(DICE, new ScalarDie(opr.intVal));
+ return new Tree<>(new Node(Node.Type.RESULT, sres));
+ case DICEFUDGE:
+ final EvaluatorResult oprn = ast.getChild(0).getHead().resultVal;
+
+ if (oprn.type != INT) {
+ Errors.inst.printError(EK_EVAL_INVDCREATE, oprn.type.toString());
+ }
+
+ final EvaluatorResult fres = new EvaluatorResult(DICE, new ScalarDie(oprn.intVal));
+ return new Tree<>(new Node(Node.Type.RESULT, fres));
+ default:
+ Errors.inst.printError(EK_EVAL_INVUNARY, ast.getHead().operatorType.toString());
+ return new Tree<>(FAIL(ast));
+ }
+ }
+
+ /* Evaluate a binary operator. */
+ private static ITree<Node> evaluateBinaryOp(final ITree<Node> ast, final Context ctx) {
+ final Token.Type binOp = ast.getHead().operatorType;
+
+ /* Binary operators always have two children. */
+ if (ast.getChildrenCount() != 2) {
+ Errors.inst.printError(EK_EVAL_INVBIN, Integer.toString(ast.getChildrenCount()),
+ ast.toString());
+
+ return new Tree<>(FAIL(ast));
+ }
+
+ final ITree<Node> left = ast.getChild(0);
+ final ITree<Node> right = ast.getChild(1);
+
+ final EvaluatorResult leftRes = left.getHead().resultVal;
+ final EvaluatorResult rightRes = right.getHead().resultVal;
+
+ switch (binOp) {
+ case ADD:
+ case SUBTRACT:
+ case MULTIPLY:
+ case DIVIDE:
+ case IDIVIDE:
+ return evaluateMathBinary(binOp, leftRes, rightRes, ctx);
+ case DICEGROUP:
+ case DICECONCAT:
+ case DICELIST:
+ return evaluateDiceBinary(binOp, leftRes, rightRes, ctx);
+ case STRCAT:
+ case STRREP:
+ return evaluateStringBinary(binOp, leftRes, rightRes, ctx);
+ default:
+ Errors.inst.printError(EK_EVAL_UNBIN, binOp.toString());
+ return new Tree<>(FAIL(ast));
+ }
+ }
+
+ /* Evaluate a binary operator on strings. */
+ private static ITree<Node> evaluateStringBinary(final Token.Type op,
+ final EvaluatorResult left,
+ final EvaluatorResult right, final Context ctx) {
+ if (left.type != STRING) {
+ Errors.inst.printError(EK_EVAL_INVSTRING, left.type.toString());
+ return new Tree<>(FAIL(left));
+ }
+
+ final String strang = left.stringVal;
+
+ switch (op) {
+ case STRCAT:
+ if (right.type != STRING) {
+ Errors.inst.printError(EK_EVAL_UNSTRING, right.type.toString());
+ return new Tree<>(FAIL(right));
+ }
+
+ final String strung = right.stringVal;
+ final EvaluatorResult cres = new EvaluatorResult(STRING, strang + strung);
+
+ return new Tree<>(new Node(Node.Type.RESULT, cres));
+ case STRREP:
+ if (right.type != INT) {
+ Errors.inst.printError(EK_EVAL_INVSTRING, right.type.toString());
+ return new Tree<>(FAIL(right));
+ }
+
+ String res = strang;
+ final long count = right.intVal;
+
+ for (long i = 1; i < count; i++) {
+ res += strang;
+ }
+
+ return new Tree<>(new Node(Node.Type.RESULT, new EvaluatorResult(STRING, res)));
+ default:
+ Errors.inst.printError(EK_EVAL_UNSTRING, op.toString());
+ return new Tree<>(FAIL());
+ }
+ }
+
+ /* Evaluate dice binary operators. */
+ private static ITree<Node> evaluateDiceBinary(final Token.Type op,
+ final EvaluatorResult left,
+ final EvaluatorResult right, final Context ctx) {
+ EvaluatorResult res = null;
+
+ switch (op) {
+ /*
+ * @TODO 10/09/17 Ben Culkin :DiceSimplify
+ * Figure out some way to simplify this sort of
+ * thing.
+ */
+ case DICEGROUP:
+ if (left.type == DICE && !left.diceVal.isList) {
+ if (right.type == DICE && !right.diceVal.isList) {
+ Die simple = new SimpleDie(
+ left.diceVal.scalar,
+ right.diceVal.scalar);
+
+ res = new EvaluatorResult(DICE, simple);
+ } else if (right.type == INT) {
+ res = new EvaluatorResult(DICE,
+ new SimpleDie(left.diceVal.scalar, right.intVal));
+ } else {
+ Errors.inst.printError(EK_EVAL_INVDGROUP, right.type.toString());
+ return new Tree<>(FAIL(right));
+ }
+ } else if (left.type == INT) {
+ if (right.type == DICE && !right.diceVal.isList) {
+ res = new EvaluatorResult(DICE,
+ new SimpleDie(left.intVal, right.diceVal.scalar));
+ } else if (right.type == INT) {
+ res = new EvaluatorResult(DICE, new SimpleDie(left.intVal, right.intVal));
+ } else {
+ Errors.inst.printError(EK_EVAL_INVDGROUP, right.type.toString());
+ return new Tree<>(FAIL(right));
+ }
+ } else {
+ Errors.inst.printError(EK_EVAL_INVDGROUP, left.type.toString());
+ return new Tree<>(FAIL(left));
+ }
+
+ case DICECONCAT:
+ if (left.type != DICE || left.diceVal.isList) {
+ Errors.inst.printError(EK_EVAL_INVDICE, left.type.toString());
+ return new Tree<>(FAIL(left));
+ } else if (right.type != DICE || right.diceVal.isList) {
+ Errors.inst.printError(EK_EVAL_INVDICE, right.type.toString());
+ return new Tree<>(FAIL(right));
+ } else {
+ res = new EvaluatorResult(DICE,
+ new CompoundDie(left.diceVal.scalar, right.diceVal.scalar));
+ }
+
+ break;
+
+ case DICELIST:
+ if (left.type != DICE || left.diceVal.isList) {
+ Errors.inst.printError(EK_EVAL_INVDICE, left.type.toString());
+ return new Tree<>(FAIL(left));
+ } else if (right.type != DICE || right.diceVal.isList) {
+ Errors.inst.printError(EK_EVAL_INVDICE, right.type.toString());
+ return new Tree<>(FAIL(right));
+ } else {
+ res = new EvaluatorResult(DICE,
+ new SimpleDieList(left.diceVal.scalar, right.diceVal.scalar));
+ }
+
+ break;
+
+ default:
+ Errors.inst.printError(EK_EVAL_UNDICE, op.toString());
+ return new Tree<>(FAIL());
+ }
+
+ return new Tree<>(new Node(Node.Type.RESULT, res));
+ }
+
+ /* Evaluate a binary math operator. */
+ private static ITree<Node> evaluateMathBinary(final Token.Type op,
+ final EvaluatorResult left,
+ final EvaluatorResult right, final Context ctx) {
+ if (left.type == STRING || right.type == STRING) {
+ Errors.inst.printError(EK_EVAL_STRINGMATH);
+ return new Tree<>(FAIL());
+ } else if (left.type == FAILURE || right.type == FAILURE) {
+ return new Tree<>(FAIL());
+ } else if (left.type == INT && right.type != INT) {
+ Errors.inst.printError(EK_EVAL_MISMATH);
+ return new Tree<>(FAIL(right));
+ } else if (left.type == FLOAT && right.type != FLOAT) {
+ Errors.inst.printError(EK_EVAL_MISMATH);
+ return new Tree<>(FAIL(right));
+ } else if (left.type == DICE && right.type != DICE) {
+ Errors.inst.printError(EK_EVAL_MISMATH);
+ return new Tree<>(FAIL(right));
+ } else if (right.type == INT && left.type != INT) {
+ Errors.inst.printError(EK_EVAL_MISMATH);
+ return new Tree<>(FAIL(left));
+ } else if (right.type == FLOAT && left.type != FLOAT) {
+ Errors.inst.printError(EK_EVAL_MISMATH);
+ return new Tree<>(FAIL(left));
+ } else if (right.type == DICE && left.type != DICE) {
+ Errors.inst.printError(EK_EVAL_MISMATH);
+ return new Tree<>(FAIL(left));
+ }
+
+ EvaluatorResult res = null;
+
+ switch (op) {
+ case ADD:
+ if (left.type == INT) {
+ res = new EvaluatorResult(INT, left.intVal + right.intVal);
+ } else if (left.type == DICE) {
+ if (left.diceVal.isList) {
+ Errors.inst.printError(EK_EVAL_INVDICE, left.toString());
+ return new Tree<>(FAIL(left));
+ } else if (right.diceVal.isList) {
+ Errors.inst.printError(EK_EVAL_INVDICE, right.toString());
+ return new Tree<>(FAIL(right));
+ }
+
+ res = new EvaluatorResult(DICE, new MathDie(MathDie.MathOp.ADD, left.diceVal.scalar,
+ right.diceVal.scalar));
+ } else {
+ res = new EvaluatorResult(FLOAT, left.floatVal + right.floatVal);
+ }
+
+ break;
+
+ case SUBTRACT:
+ if (left.type == INT) {
+ res = new EvaluatorResult(INT, left.intVal - right.intVal);
+ } else if (left.type == DICE) {
+ if (left.diceVal.isList) {
+ Errors.inst.printError(EK_EVAL_INVDICE, left.toString());
+ return new Tree<>(FAIL(left));
+ } else if (right.diceVal.isList) {
+ Errors.inst.printError(EK_EVAL_INVDICE, right.toString());
+ return new Tree<>(FAIL(right));
+ }
+
+ res = new EvaluatorResult(DICE, new MathDie(MathDie.MathOp.SUBTRACT,
+ left.diceVal.scalar, right.diceVal.scalar));
+ } else {
+ res = new EvaluatorResult(FLOAT, left.floatVal - right.floatVal);
+ }
+
+ break;
+
+ case MULTIPLY:
+ if (left.type == INT) {
+ res = new EvaluatorResult(INT, left.intVal * right.intVal);
+ } else if (left.type == DICE) {
+ if (left.diceVal.isList) {
+ Errors.inst.printError(EK_EVAL_INVDICE, left.toString());
+ return new Tree<>(FAIL(left));
+ } else if (right.diceVal.isList) {
+ Errors.inst.printError(EK_EVAL_INVDICE, right.toString());
+ return new Tree<>(FAIL(right));
+ }
+
+ res = new EvaluatorResult(DICE, new MathDie(MathDie.MathOp.MULTIPLY,
+ left.diceVal.scalar, right.diceVal.scalar));
+ } else {
+ res = new EvaluatorResult(FLOAT, left.floatVal * right.floatVal);
+ }
+
+ break;
+
+ case DIVIDE:
+ if (left.type == INT) {
+ if (right.intVal == 0) {
+ Errors.inst.printError(EK_EVAL_DIVZERO);
+ res = new EvaluatorResult(FAILURE, right);
+ } else {
+ res = new EvaluatorResult(FLOAT, left.intVal / right.intVal);
+ }
+ } else if (left.type == FLOAT) {
+ if (right.floatVal == 0) {
+ Errors.inst.printError(EK_EVAL_DIVZERO);
+ res = new EvaluatorResult(FAILURE, right);
+ } else {
+ res = new EvaluatorResult(FLOAT, left.floatVal / right.floatVal);
+ }
+ } else {
+ Errors.inst.printError(EK_EVAL_DIVDICE);
+ return new Tree<>(FAIL());
+ }
+
+ break;
+
+ case IDIVIDE:
+ if (left.type == INT) {
+ if (right.intVal == 0) {
+ Errors.inst.printError(EK_EVAL_DIVZERO);
+ res = new EvaluatorResult(FAILURE, right);
+ } else {
+ res = new EvaluatorResult(INT, (int) (left.intVal / right.intVal));
+ }
+ } else if (left.type == FLOAT) {
+ if (right.floatVal == 0) {
+ Errors.inst.printError(EK_EVAL_DIVZERO);
+ res = new EvaluatorResult(FAILURE, right);
+ } else {
+ res = new EvaluatorResult(INT, (int) (left.floatVal / right.floatVal));
+ }
+ } else {
+ Errors.inst.printError(EK_EVAL_DIVDICE);
+ return new Tree<>(FAIL());
+ }
+
+ break;
+
+ default:
+ Errors.inst.printError(EK_EVAL_UNMATH, op.toString());
+ return new Tree<>(FAIL());
+ }
+
+ return new Tree<>(new Node(Node.Type.RESULT, res));
+ }
+
+ /* Evaluate a token reference. */
+ private ITree<Node> evaluateTokenRef(final Token tk, final Context ctx) {
+ EvaluatorResult res = null;
+
+ switch (tk.type) {
+ case INT_LIT:
+ res = new EvaluatorResult(INT, tk.intValue);
+ break;
+ case FLOAT_LIT:
+ res = new EvaluatorResult(FLOAT, tk.floatValue);
+ break;
+ case DICE_LIT:
+ res = new EvaluatorResult(DICE, tk.diceValue);
+ break;
+ case STRING_LIT:
+ res = new EvaluatorResult(STRING, eng.getStringLiteral((int) tk.intValue));
+ break;
+ default:
+ Errors.inst.printError(EK_EVAL_UNTOK, tk.type.toString());
+ res = new EvaluatorResult(FAILURE);
+ }
+
+ return new Tree<>(new Node(Node.Type.RESULT, res));
+ }
+}
diff --git a/base/src/bjc/dicelang/EvaluatorResult.java b/base/src/bjc/dicelang/EvaluatorResult.java
new file mode 100644
index 0000000..3ceee9b
--- /dev/null
+++ b/base/src/bjc/dicelang/EvaluatorResult.java
@@ -0,0 +1,220 @@
+package bjc.dicelang;
+
+import bjc.dicelang.dice.Die;
+import bjc.dicelang.dice.DieExpression;
+import bjc.dicelang.dice.DieList;
+import bjc.utils.data.ITree;
+import bjc.utils.data.Tree;
+
+/*
+ * @TODO 10/09/17 Ben Culkin :EvalResultReorg
+ * Again, split it into seperate classes based off of the type.
+ */
+/**
+ * The result from the evaluator.
+ *
+ * @author EVE
+ *
+ */
+public class EvaluatorResult {
+ /**
+ * The type of the result.
+ *
+ * @author EVE
+ *
+ */
+ public static enum Type {
+ /**
+ * The type of a failure.
+ */
+ FAILURE,
+ /**
+ * The type of an integer.
+ */
+ INT,
+ /**
+ * The type of a float.
+ */
+ FLOAT,
+ /**
+ * The type of a dice.
+ */
+ DICE,
+ /**
+ * The type of a string.
+ */
+ STRING
+ }
+
+ /**
+ * The type of the result.
+ */
+ public final EvaluatorResult.Type type;
+
+ // These may or may not have values based
+ // off of the result type
+ /**
+ * The integer value of the result.
+ */
+ public long intVal;
+ /**
+ * The float value of the result.
+ */
+ public double floatVal;
+ /**
+ * The dice value of the result.
+ */
+ public DieExpression diceVal;
+ /**
+ * The string value of the result.
+ */
+ public String stringVal;
+
+ /**
+ * Original node data
+ */
+ public ITree<Node> origVal;
+
+ /**
+ * Create a new result.
+ *
+ * @param typ
+ * The type of the result.
+ */
+ public EvaluatorResult(final EvaluatorResult.Type typ) {
+ type = typ;
+ }
+
+ /**
+ * Create a new result.
+ *
+ * @param typ
+ * The type of the result.
+ *
+ * @param orig
+ * The original value of the result.
+ */
+ public EvaluatorResult(final EvaluatorResult.Type typ, final ITree<Node> orig) {
+ this(typ);
+
+ origVal = orig;
+ }
+
+ /**
+ * Create a new result.
+ *
+ * @param typ
+ * The type of the result.
+ *
+ * @param orig
+ * The original value of the result.
+ */
+ public EvaluatorResult(final EvaluatorResult.Type typ, final Node orig) {
+ this(typ, new Tree<>(orig));
+ }
+
+ /**
+ * Create a new result.
+ *
+ * @param typ
+ * @param orig
+ */
+ public EvaluatorResult(final EvaluatorResult.Type typ, final EvaluatorResult orig) {
+ this(typ, new Node(Node.Type.RESULT, orig));
+ }
+
+ /**
+ * Create a new result.
+ *
+ * @param typ
+ * @param iVal
+ */
+ public EvaluatorResult(final EvaluatorResult.Type typ, final long iVal) {
+ this(typ);
+
+ intVal = iVal;
+ }
+
+ /**
+ * Create a new result.
+ *
+ * @param typ
+ * @param dVal
+ */
+ public EvaluatorResult(final EvaluatorResult.Type typ, final double dVal) {
+ this(typ);
+
+ floatVal = dVal;
+ }
+
+ /**
+ * Create a new result.
+ *
+ * @param typ
+ * @param dVal
+ */
+ public EvaluatorResult(final EvaluatorResult.Type typ, final DieExpression dVal) {
+ this(typ);
+
+ diceVal = dVal;
+ }
+
+ /**
+ * Create a new result.
+ *
+ * @param typ
+ * @param dVal
+ */
+ public EvaluatorResult(final EvaluatorResult.Type typ, final Die dVal) {
+ this(typ);
+
+ diceVal = new DieExpression(dVal);
+ }
+
+ /**
+ * Create a new result.
+ *
+ * @param typ
+ * @param dVal
+ */
+ public EvaluatorResult(final EvaluatorResult.Type typ, final DieList dVal) {
+ this(typ);
+
+ diceVal = new DieExpression(dVal);
+ }
+
+ /**
+ * Create a new result.
+ *
+ * @param typ
+ * @param strang
+ */
+ public EvaluatorResult(final EvaluatorResult.Type typ, final String strang) {
+ this(typ);
+
+ stringVal = strang;
+ }
+
+ @Override
+ public String toString() {
+ switch (type) {
+ case INT:
+ return type.toString() + "(" + intVal + ")";
+
+ case FLOAT:
+ return type.toString() + "(" + floatVal + ")";
+
+ case DICE:
+ return type.toString() + "(" + diceVal + ")";
+
+ case STRING:
+ return type.toString() + "(" + stringVal + ")";
+
+ case FAILURE:
+ return type.toString();
+
+ default:
+ return "Unknown result type " + type.toString();
+ }
+ }
+}
diff --git a/base/src/bjc/dicelang/Node.java b/base/src/bjc/dicelang/Node.java
new file mode 100644
index 0000000..4238cfc
--- /dev/null
+++ b/base/src/bjc/dicelang/Node.java
@@ -0,0 +1,105 @@
+package bjc.dicelang;
+
+/*
+ * @TODO 10/09/17 Ben Culkin :NodeReorg
+ * Same thing, different class. Split into subclasses based off of the type
+ * values.
+ */
+/**
+ * Represents a node in the AST.
+ *
+ * @author Ben Culkin
+ */
+public class Node {
+ public static enum Type {
+ ROOT, TOKREF, UNARYOP, BINOP, GROUP, OGROUP, RESULT
+ }
+
+ public static enum GroupType {
+ ARRAY, CODE
+ }
+
+ public final Type type;
+
+ // These can have or not have values based of the node type
+ public Token tokenVal;
+ public Token.Type operatorType;
+ public GroupType groupType;
+ public EvaluatorResult resultVal;
+
+ public Node(final Type typ) {
+ type = typ;
+ }
+
+ public Node(final Type typ, final Token tokenVl) {
+ this(typ);
+
+ tokenVal = tokenVl;
+ }
+
+ public Node(final Type typ, final Token.Type opType) {
+ this(typ);
+
+ operatorType = opType;
+ }
+
+ public Node(final Type typ, final GroupType grupType) {
+ this(typ);
+
+ groupType = grupType;
+ }
+
+ public Node(final Type typ, final EvaluatorResult res) {
+ this(typ);
+
+ resultVal = res;
+ }
+
+ @Override
+ public String toString() {
+ switch (type) {
+ case UNARYOP:
+ case BINOP:
+ return "(" + type.name() + " : " + operatorType + ")";
+
+ case OGROUP:
+ case TOKREF:
+ return "(" + type.name() + " : " + tokenVal + ")";
+
+ case GROUP:
+ return "(" + type.name() + " : " + groupType + ")";
+
+ case RESULT:
+ return "(" + type.name() + " : " + resultVal + ")";
+
+ default:
+ return "Unknown node type " + type;
+ }
+ }
+
+ @Override
+ public boolean equals(final Object other) {
+ if (!(other instanceof Node)) {
+ return false;
+ }
+
+ final Node otk = (Node) other;
+
+ if (otk.type != type) {
+ return false;
+ }
+
+ switch (type) {
+ case OGROUP:
+ return tokenVal.equals(otk.tokenVal);
+
+ default:
+ return true;
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ return super.hashCode();
+ }
+}
diff --git a/base/src/bjc/dicelang/Parser.java b/base/src/bjc/dicelang/Parser.java
new file mode 100644
index 0000000..0861e96
--- /dev/null
+++ b/base/src/bjc/dicelang/Parser.java
@@ -0,0 +1,221 @@
+package bjc.dicelang;
+
+import static bjc.dicelang.Errors.ErrorKey.EK_PARSE_BINARY;
+import static bjc.dicelang.Errors.ErrorKey.EK_PARSE_INVTOKEN;
+import static bjc.dicelang.Errors.ErrorKey.EK_PARSE_NOCLOSE;
+import static bjc.dicelang.Errors.ErrorKey.EK_PARSE_UNCLOSE;
+import static bjc.dicelang.Errors.ErrorKey.EK_PARSE_UNOPERAND;
+import static bjc.dicelang.Node.Type.BINOP;
+import static bjc.dicelang.Node.Type.GROUP;
+import static bjc.dicelang.Node.Type.OGROUP;
+import static bjc.dicelang.Node.Type.TOKREF;
+import static bjc.dicelang.Node.Type.UNARYOP;
+import static bjc.dicelang.Token.Type.CBRACE;
+import static bjc.dicelang.Token.Type.CBRACKET;
+
+import java.util.Deque;
+import java.util.LinkedList;
+
+import bjc.utils.data.ITree;
+import bjc.utils.data.Tree;
+import bjc.utils.funcdata.IList;
+
+/**
+ * Parse a series of tree into tokens.
+ *
+ * @author EVE
+ *
+ */
+public class Parser {
+ /** Create a new parser. */
+ public Parser() {
+
+ }
+
+ /**
+ * Parse a series of tokens to a forest of ASTs.
+ *
+ * @param tokens
+ * The list of tokens to parse.
+ *
+ * @param results
+ * The place to set results.
+ *
+ * @return Whether or not the parse was successful.
+ */
+ public static boolean parseTokens(final IList<Token> tokens,
+ final IList<ITree<Node>> results) {
+ final Deque<ITree<Node>> working = new LinkedList<>();
+
+ for (final Token tk : tokens) {
+ switch (tk.type) {
+ case OBRACKET:
+ case OBRACE:
+ /* Parse opening delims. */
+ working.push(new Tree<>(new Node(OGROUP, tk)));
+
+ break;
+ case CBRACKET:
+ case CBRACE:
+ /* Parse closing delims. */
+ final boolean sc = parseClosingGrouper(working, tk);
+
+ if (!sc) {
+ return false;
+ }
+
+ break;
+ case MULTIPLY:
+ case DIVIDE:
+ case IDIVIDE:
+ case DICEGROUP:
+ case DICECONCAT:
+ case DICELIST:
+ case STRCAT:
+ case STRREP:
+ case LET:
+ case BIND:
+ /* Parse binary operator. */
+ if (working.size() < 2) {
+ Errors.inst.printError(EK_PARSE_BINARY);
+ return false;
+ }
+
+ handleBinaryNode(working, tk);
+ break;
+ case ADD:
+ case SUBTRACT:
+ /* Handle binary/unary operators. */
+ if (working.size() == 0) {
+ Errors.inst.printError(EK_PARSE_UNOPERAND, tk.toString());
+ return false;
+ } else if (working.size() == 1) {
+ final ITree<Node> operand = working.pop();
+ final ITree<Node> opNode = new Tree<>(new Node(UNARYOP, tk.type));
+
+ opNode.addChild(operand);
+
+ working.push(opNode);
+ } else {
+ handleBinaryNode(working, tk);
+ }
+
+ break;
+ case COERCE:
+ case DICESCALAR:
+ case DICEFUDGE:
+ /* Handle unary operators. */
+ if (working.size() == 0) {
+ Errors.inst.printError(EK_PARSE_UNOPERAND, tk.toString());
+ } else {
+ final ITree<Node> operand = working.pop();
+ final ITree<Node> opNode = new Tree<>(new Node(UNARYOP, tk.type));
+
+ opNode.addChild(operand);
+
+ working.push(opNode);
+ }
+
+ break;
+ case INT_LIT:
+ case FLOAT_LIT:
+ case STRING_LIT:
+ case VREF:
+ case DICE_LIT:
+ /* Handle literals. */
+ working.push(new Tree<>(new Node(TOKREF, tk)));
+ break;
+ default:
+ Errors.inst.printError(EK_PARSE_INVTOKEN, tk.type.toString());
+ return false;
+ }
+ }
+
+ /*
+ * Collect the remaining nodes as the roots of the trees in the
+ * AST forest.
+ */
+ for (final ITree<Node> ast : working) {
+ results.add(ast);
+ }
+
+ return true;
+ }
+
+ /* Handle a binary operator. */
+ private static void handleBinaryNode(final Deque<ITree<Node>> working, final Token tk) {
+ final ITree<Node> right = working.pop();
+ final ITree<Node> left = working.pop();
+
+ final ITree<Node> opNode = new Tree<>(new Node(BINOP, tk.type));
+
+ opNode.addChild(left);
+ opNode.addChild(right);
+
+ working.push(opNode);
+ }
+
+ /* Parse a closing delimiter. */
+ private static boolean parseClosingGrouper(final Deque<ITree<Node>> working,
+ final Token tk) {
+ if (working.size() == 0) {
+ Errors.inst.printError(EK_PARSE_NOCLOSE);
+ return false;
+ }
+
+ ITree<Node> groupNode = null;
+
+ switch (tk.type) {
+ case CBRACE:
+ groupNode = new Tree<>(new Node(GROUP, Node.GroupType.CODE));
+ break;
+ case CBRACKET:
+ groupNode = new Tree<>(new Node(GROUP, Node.GroupType.ARRAY));
+ break;
+ default:
+ Errors.inst.printError(EK_PARSE_UNCLOSE, tk.type.toString());
+ return false;
+ }
+
+ Token matching = null;
+
+ if (tk.type == CBRACKET) {
+ matching = new Token(Token.Type.OBRACKET, tk.intValue);
+ } else if (tk.type == CBRACE) {
+ matching = new Token(Token.Type.OBRACE, tk.intValue);
+ }
+
+ final ITree<Node> matchNode = new Tree<>(new Node(OGROUP, matching));
+
+ if (!working.contains(matchNode)) {
+ Errors.inst.printError(EK_PARSE_UNCLOSE, tk.toString(), matchNode.toString());
+
+ System.out.println("\tCurrent forest is: ");
+
+ int treeNo = 1;
+
+ for (final ITree<Node> ast : working) {
+ System.out.println("Tree " + treeNo++ + ": " + ast.toString());
+ }
+
+ return false;
+ }
+
+ final Deque<ITree<Node>> childs = new LinkedList<>();
+
+ while (!working.peek().equals(matchNode)) {
+ childs.push(working.pop());
+ }
+
+ /* Discard opener */
+ working.pop();
+
+ for (final ITree<Node> child : childs) {
+ groupNode.addChild(child);
+ }
+
+ working.push(groupNode);
+
+ return true;
+ }
+}
diff --git a/base/src/bjc/dicelang/Shunter.java b/base/src/bjc/dicelang/Shunter.java
new file mode 100644
index 0000000..9d63b41
--- /dev/null
+++ b/base/src/bjc/dicelang/Shunter.java
@@ -0,0 +1,380 @@
+package bjc.dicelang;
+
+import java.util.Deque;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.Set;
+
+import bjc.utils.funcdata.FunctionalList;
+import bjc.utils.funcdata.FunctionalMap;
+import bjc.utils.funcdata.IList;
+import bjc.utils.funcdata.IMap;
+
+import static bjc.dicelang.Errors.ErrorKey.EK_SHUNT_INVSEP;
+import static bjc.dicelang.Errors.ErrorKey.EK_SHUNT_NOGROUP;
+import static bjc.dicelang.Errors.ErrorKey.EK_SHUNT_NOTADJ;
+import static bjc.dicelang.Errors.ErrorKey.EK_SHUNT_NOTADV;
+import static bjc.dicelang.Errors.ErrorKey.EK_SHUNT_NOTASSOC;
+import static bjc.dicelang.Token.Type.*;
+
+/**
+ * Shunt a set of infix tokens to postfix tokens.
+ *
+ * @author EVE
+ *
+ */
+public class Shunter {
+ /* The binary operators and their priorities. */
+ IMap<Token.Type, Integer> ops;
+
+ /* Operators that are right-associative */
+ Set<Token.Type> rightAssoc;
+
+ /* Operators that aren't associative */
+ Set<Token.Type> notAssoc;
+
+ /*
+ * Unary operators that can only be applied to non-operator tokens and
+ * yield operator tokens.
+ */
+ Set<Token.Type> unaryAdjectives;
+
+ /*
+ * Unary operators that can only be applied to operator tokens and yield
+ * operator tokens.
+ */
+ Set<Token.Type> unaryAdverbs;
+
+ /*
+ * Unary operators that can only be applied to operator tokens and yield
+ * data tokens
+ */
+ Set<Token.Type> unaryGerunds;
+
+ /** Precedence for math operators. */
+ public final int MATH_PREC = 30;
+ /** Precedence for dice operators. */
+ public final int DICE_PREC = 20;
+ /** Precedence for string operators. */
+ public final int STR_PREC = 10;
+ /** Precedence for expression operators. */
+ public final int EXPR_PREC = 0;
+
+ /** Create a new shunter. */
+ public Shunter() {
+ /* Create op map. */
+ ops = new FunctionalMap<>();
+
+ /* Create association maps. */
+ rightAssoc = new HashSet<>();
+ notAssoc = new HashSet<>();
+
+ /* Create unary maps. */
+ unaryAdjectives = new HashSet<>();
+ unaryAdverbs = new HashSet<>();
+ unaryGerunds = new HashSet<>();
+
+ /* Set up unary adverbs. */
+ unaryAdverbs.add(COERCE);
+
+ /* Setup operators. */
+ /* Math operators. */
+ ops.put(ADD, 0 + MATH_PREC);
+ ops.put(SUBTRACT, 0 + MATH_PREC);
+
+ ops.put(MULTIPLY, 1 + MATH_PREC);
+ ops.put(IDIVIDE, 1 + MATH_PREC);
+ ops.put(DIVIDE, 1 + MATH_PREC);
+
+ /* Dice operators. */
+ ops.put(DICEGROUP, 0 + DICE_PREC);
+
+ ops.put(DICECONCAT, 1 + DICE_PREC);
+
+ ops.put(DICELIST, 2 + DICE_PREC);
+
+ /* String operators. */
+ ops.put(STRCAT, 0 + STR_PREC);
+
+ ops.put(STRREP, 1 + STR_PREC);
+
+ /* Expression operators. */
+ ops.put(LET, 0 + EXPR_PREC);
+
+ ops.put(BIND, 1 + EXPR_PREC);
+ }
+
+ /**
+ * Shunt a set of tokens from infix to postfix.
+ *
+ * @param tks
+ * The tokens to input.
+ *
+ * @param returned
+ * The postfix tokens.
+ *
+ * @return Whether or not the shunt succeeded.
+ */
+ public boolean shuntTokens(final IList<Token> tks, final IList<Token> returned) {
+ /* Operator stack for normal and unary operators. */
+ final Deque<Token> opStack = new LinkedList<>();
+ final Deque<Token> unaryOps = new LinkedList<>();
+
+ /* Currently returned lists. */
+ final Deque<Token> currReturned = new LinkedList<>();
+
+ /* Tokens to feed ahead of the current one. */
+ final Deque<Token> feed = new LinkedList<>();
+
+ for (final Token tk : tks.toIterable()) {
+ boolean succ;
+
+ /* Drain the feed queue. */
+ while (feed.size() != 0) {
+ succ = shuntToken(feed.poll(), opStack, unaryOps, currReturned, feed);
+
+ if (!succ) {
+ return false;
+ }
+ }
+
+ succ = shuntToken(tk, opStack, unaryOps, currReturned, feed);
+
+ if (!succ) {
+ return false;
+ }
+ }
+
+ /* Flush leftover operators. */
+ while (!opStack.isEmpty()) {
+ currReturned.addLast(opStack.pop());
+ }
+
+ /* Add the tokens to the returned list. */
+ for (final Token tk : currReturned) {
+ returned.add(tk);
+ }
+
+ return true;
+ }
+
+ /* Shunt a token. */
+ private boolean shuntToken(final Token tk, final Deque<Token> opStack,
+ final Deque<Token> unaryStack,
+ final Deque<Token> currReturned, final Deque<Token> feed) {
+ /* Handle unary operators. */
+ if (unaryStack.size() != 0) {
+ if (isUnary(tk)) {
+ unaryStack.add(tk);
+ return true;
+ }
+
+ final Token unaryOp = unaryStack.pop();
+
+ final Token.Type unaryType = unaryOp.type;
+
+ if (unaryAdjectives.contains(unaryType)) {
+ /*
+ * Handle unary adjectives that take a
+ * non-operator.
+ */
+ if (isOp(tk)) {
+ Errors.inst.printError(EK_SHUNT_NOTADV, unaryOp.toString(), tk.toString());
+ return false;
+ }
+
+ final Token newTok = new Token(TAGOPR);
+
+ if (tk.type == TAGOP) {
+ newTok.tokenValues = tk.tokenValues;
+ } else {
+ newTok.tokenValues = new FunctionalList<>(tk);
+ }
+
+ newTok.tokenValues.add(unaryOp);
+ opStack.push(newTok);
+
+ return true;
+ } else if (unaryAdverbs.contains(unaryType)) {
+ /* Handle unary adverbs that take an operator. */
+ if (!isOp(tk)) {
+ Errors.inst.printError(EK_SHUNT_NOTADJ, unaryOp.toString(), tk.toString());
+ return false;
+ }
+
+ final Token newTok = new Token(TAGOPR);
+
+ if (tk.type == TAGOP) {
+ newTok.tokenValues = tk.tokenValues;
+ } else {
+ newTok.tokenValues = new FunctionalList<>(tk);
+ }
+
+ newTok.tokenValues.add(unaryOp);
+ opStack.push(newTok);
+
+ return true;
+ }
+ }
+
+ if (isUnary(tk)) {
+ unaryStack.add(tk);
+ return true;
+ } else if (isOp(tk)) {
+ /* Drain higher precedence operators. */
+ while (!opStack.isEmpty() && isHigherPrec(tk, opStack.peek())) {
+ final Token newOp = opStack.pop();
+
+ if (tk.type == newOp.type && notAssoc.contains(tk.type)) {
+ Errors.inst.printError(EK_SHUNT_NOTASSOC, tk.type.toString());
+ }
+
+ currReturned.addLast(newOp);
+ }
+
+ opStack.push(tk);
+ } else if (tk.type == OPAREN || tk.type == OBRACE) {
+ opStack.push(tk);
+
+ if (tk.type == OBRACE) {
+ currReturned.addLast(tk);
+ }
+ } else if (tk.type == CPAREN || tk.type == CBRACE) {
+ /* Handle closing delimiter. */
+ Token matching = null;
+
+ switch (tk.type) {
+ case CPAREN:
+ matching = new Token(OPAREN, tk.intValue);
+ break;
+ case CBRACE:
+ matching = new Token(OBRACE, tk.intValue);
+ break;
+ default:
+ Errors.inst.printError(EK_SHUNT_NOGROUP);
+ return false;
+ }
+
+ if (!opStack.contains(matching)) {
+ Errors.inst.printError(EK_SHUNT_NOGROUP, tk.toString(), matching.toString());
+ return false;
+ }
+
+ while (!opStack.peek().equals(matching)) {
+ currReturned.addLast(opStack.pop());
+ }
+
+ if (tk.type == CBRACE) {
+ currReturned.addLast(tk);
+ }
+
+ opStack.pop();
+ } else if (tk.type == GROUPSEP) {
+ /* Add a grouped token. */
+ final IList<Token> group = new FunctionalList<>();
+
+ while (currReturned.size() != 0 && !currReturned.peek().isGrouper()) {
+ group.add(currReturned.pop());
+ }
+
+ while (opStack.size() != 0 && !opStack.peek().isGrouper()) {
+ group.add(opStack.pop());
+ }
+
+ if (currReturned.size() == 0) {
+ Errors.inst.printError(EK_SHUNT_INVSEP);
+ return false;
+ }
+
+ currReturned.addLast(new Token(TOKGROUP, group));
+ } else {
+ currReturned.addLast(tk);
+ }
+
+ return true;
+ }
+
+ /* Check if an operator has higher precedence. */
+ private boolean isHigherPrec(final Token lft, final Token rght) {
+ final Token.Type left = lft.type;
+ final Token.Type right = rght.type;
+
+ boolean exists = ops.containsKey(right);
+
+ if (rght.type == TAGOPR) {
+ exists = true;
+ }
+
+ /* If it doesn't, the left is higher precedence. */
+ if (!exists) {
+ return false;
+ }
+
+ int rightPrecedence;
+ int leftPrecedence;
+
+ if (rght.type == TAGOPR) {
+ rightPrecedence = (int) rght.intValue;
+ } else {
+ rightPrecedence = ops.get(right);
+ }
+
+ if (lft.type == TAGOPR) {
+ leftPrecedence = (int) lft.intValue;
+ } else {
+ leftPrecedence = ops.get(left);
+ }
+
+ if (rightAssoc.contains(left)) {
+ return rightPrecedence > leftPrecedence;
+ }
+
+ return rightPrecedence >= leftPrecedence;
+ }
+
+ /* Check if something is an operator. */
+ private boolean isOp(final Token tk) {
+ final Token.Type ty = tk.type;
+
+ if (ops.containsKey(ty)) {
+ return true;
+ }
+
+ if (unaryAdjectives.contains(ty)) {
+ return true;
+ }
+
+ if (unaryAdverbs.contains(ty)) {
+ return true;
+ }
+
+ if (unaryGerunds.contains(ty)) {
+ return true;
+ }
+
+ if (ty == TAGOPR) {
+ return true;
+ }
+
+ return false;
+ }
+
+ /* Check if something is a unary operator. */
+ private boolean isUnary(final Token tk) {
+ final Token.Type ty = tk.type;
+
+ if (unaryAdjectives.contains(ty)) {
+ return true;
+ }
+
+ if (unaryAdverbs.contains(ty)) {
+ return true;
+ }
+
+ if (unaryGerunds.contains(ty)) {
+ return true;
+ }
+
+ return false;
+ }
+}
diff --git a/base/src/bjc/dicelang/Token.java b/base/src/bjc/dicelang/Token.java
new file mode 100644
index 0000000..2841973
--- /dev/null
+++ b/base/src/bjc/dicelang/Token.java
@@ -0,0 +1,149 @@
+package bjc.dicelang;
+
+import bjc.dicelang.dice.DieExpression;
+import bjc.utils.funcdata.IList;
+
+/*
+ * @TODO 10/09/17 Ben Culkin :TokenReorg
+ * Split the class into subclasses based off of type.
+ */
+/**
+ * Lexer token.
+ */
+@SuppressWarnings("javadoc")
+public class Token {
+ public final static Token NIL_TOKEN = new Token(Type.NIL);
+
+ /**
+ * Possible token types
+ */
+ public static enum Type {
+ // Natural tokens
+ // These are produced from lexemes
+ ADD, SUBTRACT, MULTIPLY, DIVIDE, IDIVIDE, INT_LIT, FLOAT_LIT, STRING_LIT, VREF, DICE_LIT, DICESCALAR, DICEFUDGE, DICEGROUP, DICECONCAT, DICELIST, LET, BIND, COERCE, STRCAT, STRREP, OPAREN, CPAREN, OBRACKET, CBRACKET, OBRACE, CBRACE,
+
+ // Synthetic tokens
+ // These are produced when needed
+ NIL, GROUPSEP, TOKGROUP, TAGOP, TAGOPR
+ }
+
+ public final Type type;
+
+ // This is used for the following token types
+ // INT_LIT (int value)
+ // STRING_LIT (index into string table)
+ // VREF (index into sym table)
+ // O* and C* (sym-count of current token)
+ public long intValue;
+
+ // This is used for the following token types
+ // FLOAT_LIT (float value)
+ public double floatValue;
+
+ // This is used for the following token types
+ // DICE_LIT (dice value)
+ public DieExpression diceValue;
+
+ // This is used for the following token types
+ // TOKGROUP (the tokens in the group)
+ // TAG* (the tagged construct)
+ public IList<Token> tokenValues;
+
+ public Token(final Type typ) {
+ type = typ;
+ }
+
+ public Token(final Type typ, final long val) {
+ this(typ);
+
+ intValue = val;
+ }
+
+ public Token(final Type typ, final double val) {
+ this(typ);
+
+ floatValue = val;
+ }
+
+ public Token(final Type typ, final DieExpression val) {
+ this(typ);
+
+ diceValue = val;
+ }
+
+ public Token(final Type typ, final IList<Token> tkVals) {
+ this(typ);
+
+ tokenValues = tkVals;
+ }
+
+ @Override
+ public String toString() {
+ switch (type) {
+ case INT_LIT:
+ case STRING_LIT:
+ case VREF:
+ case OPAREN:
+ case CPAREN:
+ case OBRACKET:
+ case CBRACKET:
+ case OBRACE:
+ case CBRACE:
+ return type.toString() + "(" + intValue + ")";
+
+ case FLOAT_LIT:
+ return type.toString() + "(" + floatValue + ")";
+
+ case DICE_LIT:
+ return type.toString() + "(" + diceValue + ")";
+
+ case TAGOP:
+ case TAGOPR:
+ case TOKGROUP:
+ return type.toString() + "(" + tokenValues + ")";
+
+ default:
+ return type.toString();
+ }
+ }
+
+ @Override
+ public boolean equals(final Object other) {
+ if (!(other instanceof Token)) {
+ return false;
+ }
+
+ final Token otk = (Token) other;
+
+ if (otk.type != type) {
+ return false;
+ }
+
+ switch (type) {
+ case OBRACE:
+ case OBRACKET:
+ return intValue == otk.intValue;
+
+ default:
+ return true;
+ }
+ }
+
+ public boolean isGrouper() {
+ switch (type) {
+ case OPAREN:
+ case OBRACE:
+ case OBRACKET:
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ // TODO Auto-generated method stub
+ return super.hashCode();
+ }
+}
diff --git a/base/src/bjc/dicelang/Tokenizer.java b/base/src/bjc/dicelang/Tokenizer.java
new file mode 100644
index 0000000..3e4a490
--- /dev/null
+++ b/base/src/bjc/dicelang/Tokenizer.java
@@ -0,0 +1,174 @@
+package bjc.dicelang;
+
+import static bjc.dicelang.Errors.ErrorKey.EK_TOK_INVBASE;
+import static bjc.dicelang.Errors.ErrorKey.EK_TOK_INVFLEX;
+import static bjc.dicelang.Errors.ErrorKey.EK_TOK_UNGROUP;
+import static bjc.dicelang.Token.Type.*;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import bjc.dicelang.dice.DiceBox;
+import bjc.utils.funcdata.FunctionalMap;
+import bjc.utils.funcdata.IMap;
+import bjc.utils.funcutils.StringUtils;
+import bjc.utils.parserutils.TokenUtils;
+
+/**
+ * Converts strings into tokens.
+ */
+public class Tokenizer {
+ /* Literal tokens for tokenization */
+ private final IMap<String, Token.Type> litTokens;
+
+ private final DiceLangEngine eng;
+
+ private int nextSym = 0;
+
+ public Tokenizer(final DiceLangEngine engine) {
+ eng = engine;
+
+ litTokens = new FunctionalMap<>();
+
+ litTokens.put("+", ADD);
+ litTokens.put("-", SUBTRACT);
+ litTokens.put("*", MULTIPLY);
+ litTokens.put("/", DIVIDE);
+ litTokens.put("//", IDIVIDE);
+ litTokens.put("sd", DICESCALAR);
+ litTokens.put("df", DICEFUDGE);
+ litTokens.put("dg", DICEGROUP);
+ litTokens.put("dc", DICECONCAT);
+ litTokens.put("dl", DICELIST);
+ litTokens.put("=>", LET);
+ litTokens.put(":=", BIND);
+ litTokens.put(".+.", STRCAT);
+ litTokens.put(".*.", STRREP);
+ litTokens.put(",", GROUPSEP);
+ litTokens.put("crc", COERCE);
+ }
+
+ public Token lexToken(final String token, final IMap<String, String> stringLts) {
+ if (token.equals("")) {
+ return null;
+ }
+
+ Token tk = Token.NIL_TOKEN;
+
+ if (litTokens.containsKey(token)) {
+ tk = new Token(litTokens.get(token));
+ } else {
+ switch (token.charAt(0)) {
+ case '(':
+ case ')':
+ case '[':
+ case ']':
+ case '{':
+ case '}':
+ tk = tokenizeGrouping(token);
+ break;
+ default:
+ tk = tokenizeLiteral(token, stringLts);
+ }
+ }
+
+ return tk;
+ }
+
+ private static Token tokenizeGrouping(final String token) {
+ Token tk = Token.NIL_TOKEN;
+
+ if (StringUtils.containsOnly(token, "\\" + token.charAt(0))) {
+ /* Handle multiple-grouped delimiters. */
+ switch (token.charAt(0)) {
+ case '(':
+ tk = new Token(OPAREN, token.length());
+ break;
+
+ case ')':
+ tk = new Token(CPAREN, token.length());
+ break;
+
+ case '[':
+ tk = new Token(OBRACKET, token.length());
+ break;
+
+ case ']':
+ tk = new Token(CBRACKET, token.length());
+ break;
+
+ case '{':
+ tk = new Token(OBRACE, token.length());
+ break;
+
+ case '}':
+ tk = new Token(CBRACE, token.length());
+ break;
+
+ default:
+ Errors.inst.printError(EK_TOK_UNGROUP, token);
+ break;
+ }
+ }
+
+ return tk;
+ }
+
+ /* Patterns for matching. */
+ private final Pattern hexadecimalMatcher =
+ Pattern.compile("\\A[\\-\\+]?0x[0-9A-Fa-f]+\\Z");
+ private final Pattern flexadecimalMatcher =
+ Pattern.compile("\\A[\\-\\+]?[0-9][0-9A-Za-z]+B\\d{1,2}\\Z");
+ private final Pattern stringLitMatcher =
+ Pattern.compile("\\AstringLiteral(\\d+)\\Z");
+
+ /* Tokenize a literal value. */
+ private Token tokenizeLiteral(final String rtoken, final IMap<String, String> stringLts) {
+ Token tk = Token.NIL_TOKEN;
+
+ String token = rtoken.trim();
+
+ if (TokenUtils.isInt(token)) {
+ tk = new Token(INT_LIT, Long.parseLong(token));
+ } else if (hexadecimalMatcher.matcher(token).matches()) {
+ final String newToken = token.substring(0, 1) + token.substring(token.indexOf('x'));
+ tk = new Token(INT_LIT, Long.parseLong(newToken.substring(2).toUpperCase(), 16));
+ } else if (flexadecimalMatcher.matcher(token).matches()) {
+ final int parseBase = Integer.parseInt(token.substring(token.lastIndexOf('B') + 1));
+
+ if (parseBase < Character.MIN_RADIX || parseBase > Character.MAX_RADIX) {
+ Errors.inst.printError(EK_TOK_INVBASE, Integer.toString(parseBase));
+ return Token.NIL_TOKEN;
+ }
+
+ final String flexNum = token.substring(0, token.lastIndexOf('B'));
+
+ try {
+ tk = new Token(INT_LIT, Long.parseLong(flexNum, parseBase));
+ } catch (final NumberFormatException nfex) {
+ Errors.inst.printError(EK_TOK_INVFLEX, flexNum, Integer.toString(parseBase));
+ return Token.NIL_TOKEN;
+ }
+ } else if (TokenUtils.isDouble(token)) {
+ tk = new Token(FLOAT_LIT, Double.parseDouble(token));
+ } else if (DiceBox.isValidExpression(token)) {
+ tk = new Token(DICE_LIT, DiceBox.parseExpression(token));
+ } else {
+ final Matcher stringLit = stringLitMatcher.matcher(token);
+
+ if (stringLit.matches()) {
+ final int litNum = Integer.parseInt(stringLit.group(1));
+
+ eng.addStringLiteral(litNum, stringLts.get(token));
+ tk = new Token(STRING_LIT, litNum);
+ } else {
+ /* Everything else is a symbol */
+ eng.symTable.put(nextSym++, token);
+
+ tk = new Token(VREF, nextSym - 1);
+ }
+ }
+
+ return tk;
+ }
+}
diff --git a/base/src/bjc/dicelang/dice/CompoundDie.java b/base/src/bjc/dicelang/dice/CompoundDie.java
new file mode 100644
index 0000000..0793872
--- /dev/null
+++ b/base/src/bjc/dicelang/dice/CompoundDie.java
@@ -0,0 +1,60 @@
+package bjc.dicelang.dice;
+
+/**
+ * A die whose rolls result from concatenating two other rolls together.
+ *
+ * @author Ben Culkin
+ */
+public class CompoundDie implements Die {
+ /* The dice that form this die */
+ private final Die left;
+ private final Die right;
+
+ /**
+ * Create a new compound die.
+ *
+ * @param lft
+ * The left die
+ * @param rght
+ * The right die
+ */
+ public CompoundDie(final Die lft, final Die rght) {
+ left = lft;
+ right = rght;
+ }
+
+ @Override
+ public boolean canOptimize() {
+ return left.canOptimize() && right.canOptimize();
+ }
+
+ @Override
+ public long optimize() {
+ long leftOpt = left.optimize();
+ long rightOpt = right.optimize();
+
+ return Long.parseLong(String.format("%d%d", leftOpt, rightOpt));
+ }
+
+ @Override
+ public long roll() {
+ long leftRoll = left.optimize();
+ long rightRoll = right.optimize();
+
+ return Long.parseLong(String.format("%d%d", leftRoll, rightRoll));
+ }
+
+ @Override
+ public long rollSingle() {
+ /* Actually one dice built using two, can't be split. */
+ return roll();
+ }
+
+ @Override
+ public String toString() {
+ String leftString = left.toString();
+ String rightString = right.toString();
+
+ return String.format("%sc%s", leftString, rightString);
+ }
+}
diff --git a/base/src/bjc/dicelang/dice/CompoundingDie.java b/base/src/bjc/dicelang/dice/CompoundingDie.java
new file mode 100644
index 0000000..023282e
--- /dev/null
+++ b/base/src/bjc/dicelang/dice/CompoundingDie.java
@@ -0,0 +1,115 @@
+package bjc.dicelang.dice;
+
+import java.util.function.Predicate;
+
+/**
+ * Implements a compounding die.
+ *
+ * This means that the source will be rolled, and then more single rolls will be
+ * added while it meets a qualification.
+ *
+ * @author Ben Culkin
+ */
+public class CompoundingDie implements Die {
+ /* The source die to compound. */
+ private final Die source;
+
+ /* The predicate that marks when to compound. */
+ private final Predicate<Long> compoundOn;
+ /* The string version of the predicate, if one exists. */
+ private final String compoundPattern;
+
+ /**
+ * Create a new compounding die with no pattern.
+ *
+ * @param src
+ * The die to compound from
+ * @param compound
+ * The conditions to compound on
+ */
+ public CompoundingDie(final Die src, final Predicate<Long> compound) {
+ this(src, compound, null);
+ }
+
+ /**
+ * Create a new compounding die with a specified pattern.
+ *
+ * @param src
+ * The die to compound from
+ * @param compound
+ * The conditions to compound on
+ * @param patt
+ * The string pattern the condition came from, for
+ * printing
+ */
+ public CompoundingDie(final Die src, final Predicate<Long> compound, final String patt) {
+ source = src;
+
+ compoundOn = compound;
+ compoundPattern = patt;
+ }
+
+ @Override
+ public boolean canOptimize() {
+ if (source.canOptimize()) {
+ /* We can only be optimized for a result of zero. */
+ return source.optimize() == 0;
+ }
+
+ return false;
+ }
+
+ @Override
+ public long optimize() {
+ /* If we can be optimized, its to zero. */
+ return 0;
+ }
+
+ @Override
+ public long roll() {
+ /* The current result. */
+ long res = source.roll();
+ /* The last result. */
+ long oldRes = res;
+
+ while (compoundOn.test(oldRes)) {
+ /* Compound while the result should be compounded. */
+ oldRes = source.rollSingle();
+
+ /* Accumulate. */
+ res += oldRes;
+ }
+
+ return res;
+ }
+
+ @Override
+ public long rollSingle() {
+ /* Just compound on an initial single role. */
+ long res = source.rollSingle();
+ /* The last result. */
+ long oldRes = res;
+
+ while (compoundOn.test(oldRes)) {
+ /* Compound while the result should be compounded. */
+ oldRes = source.rollSingle();
+
+ /* Accumulate. */
+ res += oldRes;
+ }
+
+ return res;
+ }
+
+ @Override
+ public String toString() {
+ String sourceString = source.toString();
+
+ /* Can't print a parseable version. */
+ if (compoundPattern == null) {
+ return String.format("%s!!<complex-pattern>", sourceString);
+ }
+
+ return String.format("%s!!%s", sourceString, compoundPattern);
+ }
+}
diff --git a/base/src/bjc/dicelang/dice/DiceBox.java b/base/src/bjc/dicelang/dice/DiceBox.java
new file mode 100644
index 0000000..8d00d96
--- /dev/null
+++ b/base/src/bjc/dicelang/dice/DiceBox.java
@@ -0,0 +1,310 @@
+package bjc.dicelang.dice;
+
+import java.util.Random;
+import java.util.function.Predicate;
+import java.util.regex.Pattern;
+
+/**
+ * Contains static methods for producing dice from strings.
+ *
+ * @author Ben Culkin
+ */
+public class DiceBox {
+ static final Random rng = new Random();
+
+ /**
+ * Parse a die expression from a string.
+ *
+ * @param expString
+ * The string to parse.
+ *
+ * @return The die expression from the string, or null if it wasn't one
+ */
+ public static DieExpression parseExpression(final String expString) {
+ try {
+ return doParseExpression(expString);
+ } catch (Exception ex) {
+ /*
+ * @TODO 10/08/17 Ben Culkin :DieErrors :ErrorRefactor
+ * Use different types of exceptions to provide
+ * better error messages.
+ */
+ String exMessage = ex.getMessage();
+
+ System.out.printf("ERROR: Could not parse die expression (Cause: %s)\n", exMessage);
+ ex.printStackTrace();
+
+ return null;
+ }
+ }
+
+ private static DieExpression doParseExpression(final String expString) {
+ /* Only bother with valid expressions. */
+ if (!isValidExpression(expString)) {
+ return null;
+ }
+
+ if (scalarDiePattern.matcher(expString).matches()) {
+ /* Parse scalar die. */
+ /* @TODO 10/08/17 Ben Culkin :SubstringIndexOf
+ * This substring/index of call should be
+ * abstracted into its own method so as to make the
+ * code more explanatory and ensure that things
+ * like the return code of indexOf are correctly
+ * checked.
+ */
+ final String dieString = expString.substring(0, expString.indexOf('s'));
+
+ final long lar = Long.parseLong(dieString);
+
+ final Die scal = new ScalarDie(lar);
+
+ return new DieExpression(scal);
+ } else if (simpleDiePattern.matcher(expString).matches()) {
+ /* Parse simple die groups. */
+ final String[] dieParts = expString.split("d");
+
+ final long right = Long.parseLong(dieParts[1]);
+ final long left;
+
+ if (dieParts[0].equals("")) {
+ /* Handle short-form expressions. */
+ left = 1;
+ } else {
+ left = Long.parseLong(dieParts[0]);
+ }
+
+ final Die scal = new SimpleDie(left, right);
+
+ return new DieExpression(scal);
+ } else if (fudgeDiePattern.matcher(expString).matches()) {
+ /* Parse fudge dice. */
+ /* :SubstringIndexOf */
+ final String nDice = expString.substring(0, expString.indexOf('d'));
+ final Die fudge = new FudgeDie(Long.parseLong(nDice));
+
+ return new DieExpression(fudge);
+ } else if (compoundDiePattern.matcher(expString).matches()) {
+ /* Parse compound die expressions. */
+ final String[] dieParts = expString.split("c");
+
+ /* @TODO 10/08/17 :SplitParse
+ * Should this split string/parse split parts be
+ * abstracted into something else that handles
+ * doing the splitting correctly, as well as
+ * making sure that the resulting DieExpressions
+ * are of the right type?
+ */
+ final DieExpression left = parseExpression(dieParts[0]);
+ final DieExpression right = parseExpression(dieParts[1]);
+
+ /* :ErrorRefactor */
+ if (left.isList) {
+ System.out.printf("ERROR: Expected a scalar die expression for lhs of compound die, got a list expression instead (%s)\n",
+ left);
+ } else if (right.isList) {
+ System.out.printf("ERROR: Expected a scalar die expression for rhs of compound die, got a list expression instead (%s)\n",
+ right);
+ }
+
+ final Die compound = new CompoundDie(left.scalar, right.scalar);
+
+ return new DieExpression(new CompoundDie(left.scalar, right.scalar));
+ } else if (compoundingDiePattern.matcher(expString).matches()) {
+ /* Parse compounding die expressions. */
+ final String[] dieParts = expString.split("!!");
+
+ final DieExpression left = parseExpression(dieParts[0]);
+ final Predicate<Long> right = deriveCond(dieParts[1]);
+
+ final Die scal = new CompoundingDie(left.scalar, right, dieParts[1]);
+
+ return new DieExpression(scal);
+ } else if (explodingDiePattern.matcher(expString).matches()) {
+ /* Parse exploding die expressions. */
+ final String[] dieParts = expString.split("!");
+
+ final DieExpression left = parseExpression(dieParts[0]);
+ final Predicate<Long> right = deriveCond(dieParts[1]);
+
+ final DieList lst = new ExplodingDice(left.scalar, right, dieParts[1], false);
+
+ return new DieExpression(lst);
+ } else if (penetratingDiePattern.matcher(expString).matches()) {
+ /* Parse penetrating die expressions. */
+ final String[] dieParts = expString.split("p!");
+
+ final DieExpression left = parseExpression(dieParts[0]);
+ final Predicate<Long> right = deriveCond(dieParts[1]);
+
+ final DieList lst = new ExplodingDice(left.scalar, right, dieParts[1], true);
+
+ return new DieExpression(lst);
+ } else if (diceListPattern.matcher(expString).matches()) {
+ /* Parse simple die lists. */
+ final String[] dieParts = expString.split("dl");
+
+ final DieExpression left = parseExpression(dieParts[0]);
+ final DieExpression right = parseExpression(dieParts[1]);
+
+ final DieList lst = new SimpleDieList(left.scalar, right.scalar);
+ return new DieExpression(lst);
+ }
+
+ /* Unhandled type of die expression. */
+ System.out.printf("INTERNAL ERROR: Valid die expression '%s' not parsed\n", expString);
+ return null;
+ }
+
+ /* The strings and patterns used for matching. */
+ /* @TODO 10/08/17 Ben Culkin :RegexResource
+ * These regexes and patterns should be moved to something
+ * external, probably using the SimpleProperties-based system that
+ * BJC-Utils2 uses.
+ */
+ /* Defines a comparison predicate. */
+ private static final String comparePoint = "[<>=]\\d+";
+
+ /*
+ * Defines a scalar die.
+ *
+ * This is just a number.
+ */
+ private static final String scalarDie = "[\\+\\-]?\\d+sd";
+ private static final Pattern scalarDiePattern = Pattern.compile(
+ String.format("\\A%s\\Z", scalarDie));
+
+ /*
+ * Defines a simple die.
+ *
+ * This is a group of one or more dice of the same size.
+ */
+ private static final String simpleDie = "(?:\\d+)?d\\d+";
+ private static final Pattern simpleDiePattern = Pattern.compile("\\A" +
+ simpleDie + "\\Z");
+
+ /*
+ * Defines a fudge die.
+ *
+ * This is like a simple die, but all the die give -1, 0, or 1 as
+ * results.
+ */
+ private static final String fudgeDie = "(?:\\d+)?dF";
+ private static final Pattern fudgeDiePattern = Pattern.compile("\\A" + fudgeDie +
+ "\\Z");
+
+ /*
+ * Defines a compound die.
+ *
+ * This is like using two d10's to simulate a d100
+ */
+ private static final String compoundDie = simpleDie + "c(?:(?:" +
+ simpleDie + ")|(?:\\d+))";
+ private static final Pattern compoundDiePattern = Pattern.compile("\\A" +
+ compoundDie + "\\Z");
+
+ /*
+ * Defines a compound group.
+ *
+ * This is used for forming die list type expressions.
+ */
+ private static final String compoundGroup = "(?:(?:" + scalarDie + ")|(?:" + simpleDie +
+ ")|(?:" + compoundDie
+ + ")|(?:" + fudgeDie + "))";
+
+ /*
+ * Defines a compounding die.
+ *
+ * This is like an exploding die, but is a single die, not a group of
+ * them.
+ */
+ private static final String compoundingDie = compoundGroup + "!!" +
+ comparePoint;
+ private static final Pattern compoundingDiePattern = Pattern.compile("\\A" +
+ compoundingDie + "\\Z");
+
+ /*
+ * Defines an exploding die.
+ *
+ * This is a die that you reroll the component of if it meets a certain
+ * condition.
+ */
+ private static final String explodingDie = compoundGroup + "!" +
+ comparePoint;
+ private static final Pattern explodingDiePattern = Pattern.compile("\\A" +
+ explodingDie + "\\Z");
+
+ /*
+ * Defines a penetrating die.
+ *
+ * This is like an exploding die, but the exploded result gets a -1
+ * penalty.
+ */
+ private static final String penetratingDie = compoundGroup + "!" +
+ comparePoint;
+ private static final Pattern penetratingDiePattern = Pattern.compile("\\A" +
+ penetratingDie + "\\Z");
+
+ /*
+ * Defines a die list.
+ *
+ * This is an array of dice of the specified size.
+ */
+ private static final String diceList = compoundGroup + "dl" + compoundGroup;
+ private static final Pattern diceListPattern = Pattern.compile("\\A" + diceList +
+ "\\Z");
+
+ /**
+ * Check if a given string is a valid die expression.
+ *
+ * @param exp
+ * The string to check validity of.
+ *
+ * @return Whether or not the string is a valid command.
+ */
+ public static boolean isValidExpression(final String exp) {
+ /* @NOTE
+ * Should this matcher/matches expression be abstracted in
+ * some way?
+ */
+ if (scalarDiePattern.matcher(exp).matches()) {
+ return true;
+ } else if (simpleDiePattern.matcher(exp).matches()) {
+ return true;
+ } else if (fudgeDiePattern.matcher(exp).matches()) {
+ return true;
+ } else if (compoundDiePattern.matcher(exp).matches()) {
+ return true;
+ } else if (compoundingDiePattern.matcher(exp).matches()) {
+ return true;
+ } else if (explodingDiePattern.matcher(exp).matches()) {
+ return true;
+ } else if (penetratingDiePattern.matcher(exp).matches()) {
+ return true;
+ } else if (diceListPattern.matcher(exp).matches()) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ /* Derive a predicate from a compare point */
+ private static Predicate<Long> deriveCond(final String patt) {
+ final long num = Long.parseLong(patt.substring(1));
+
+ /* @NOTE
+ * Should this be extended in some way, to provide other
+ * operators?
+ */
+ switch (patt.charAt(0)) {
+ case '<':
+ return (roll) -> (roll < num);
+ case '=':
+ return (roll) -> (roll == num);
+ case '>':
+ return (roll) -> (roll > num);
+ default:
+ return (roll) -> (false);
+ }
+ }
+}
diff --git a/base/src/bjc/dicelang/dice/Die.java b/base/src/bjc/dicelang/dice/Die.java
new file mode 100644
index 0000000..630e8b9
--- /dev/null
+++ b/base/src/bjc/dicelang/dice/Die.java
@@ -0,0 +1,39 @@
+package bjc.dicelang.dice;
+
+/**
+ * Represents one or more dice that produce a scalar result.
+ *
+ * @author Ben Culkin
+ */
+public interface Die {
+ /**
+ * Can this die be optimized to a single number?
+ *
+ * @return Whether this die can be optimized or not.
+ */
+ boolean canOptimize();
+
+ /**
+ * Optimize this die to a single number.
+ *
+ * Calling optimize on dice that return false for canOptimize produces
+ * undefined behavior
+ *
+ * @return The optimized form of this die
+ */
+ long optimize();
+
+ /**
+ * Roll this die.
+ *
+ * @return A possible roll of this die
+ */
+ long roll();
+
+ /**
+ * Roll only a single portion of this die.
+ *
+ * @return A possible roll of a single portion of this die.
+ */
+ long rollSingle();
+}
diff --git a/base/src/bjc/dicelang/dice/DieExpression.java b/base/src/bjc/dicelang/dice/DieExpression.java
new file mode 100644
index 0000000..b8faeda
--- /dev/null
+++ b/base/src/bjc/dicelang/dice/DieExpression.java
@@ -0,0 +1,69 @@
+package bjc.dicelang.dice;
+
+import java.util.Arrays;
+
+/*
+ * @NOTE
+ * I'm not a particularly large fan of sticking everything on this class
+ * and just documenting which fields are tied together in a non-obvious
+ * way. I think a class hierarchy might be better, but I am unsure of the
+ * details.
+ */
+/**
+ * Represents either a die or a die list.
+ *
+ * @author Ben Culkin
+ */
+public class DieExpression {
+ /** Is this expression a list? */
+ public final boolean isList;
+
+ /** The scalar value in this expression, if there is one. */
+ public Die scalar;
+ /** The list value in this expression, if there is one. */
+ public DieList list;
+
+ /**
+ * Create a scalar die expression.
+ *
+ * @param scal
+ * The scalar value of this expression.
+ */
+ public DieExpression(final Die scal) {
+ isList = false;
+ scalar = scal;
+ }
+
+ /**
+ * Create a list die expression.
+ *
+ * @param lst
+ * The list value of this expression.
+ */
+ public DieExpression(final DieList lst) {
+ isList = true;
+ list = lst;
+ }
+
+ @Override
+ public String toString() {
+ if (isList) {
+ return list.toString();
+ }
+
+ return scalar.toString();
+ }
+
+ /**
+ * Get the value of this expression as a string.
+ *
+ * @return The value of the expression as a string.
+ */
+ public String value() {
+ if (isList) {
+ return Arrays.toString(list.roll());
+ }
+
+ return Long.toString(scalar.roll());
+ }
+}
diff --git a/base/src/bjc/dicelang/dice/DieList.java b/base/src/bjc/dicelang/dice/DieList.java
new file mode 100644
index 0000000..48006d4
--- /dev/null
+++ b/base/src/bjc/dicelang/dice/DieList.java
@@ -0,0 +1,31 @@
+package bjc.dicelang.dice;
+
+/**
+ * Represents a group of dice.
+ *
+ * @author Ben Culkin.
+ */
+public interface DieList {
+ /**
+ * Can this list be optimized?
+ *
+ * @return Whether or not this list can be optimized.
+ */
+ boolean canOptimize();
+
+ /**
+ * Optimize this list, if it can be done.
+ *
+ * Invoking this on unoptimizable expression is undefined.
+ *
+ * @return The optimized form of this list.
+ */
+ long[] optimize();
+
+ /**
+ * Roll this group of dice.
+ *
+ * @return A possible roll of this group.
+ */
+ long[] roll();
+}
diff --git a/base/src/bjc/dicelang/dice/ExplodingDice.java b/base/src/bjc/dicelang/dice/ExplodingDice.java
new file mode 100644
index 0000000..e891a1c
--- /dev/null
+++ b/base/src/bjc/dicelang/dice/ExplodingDice.java
@@ -0,0 +1,124 @@
+package bjc.dicelang.dice;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.function.Predicate;
+
+/**
+ * An exploding die.
+ *
+ * Represents a die list that keeps getting another added die as long as a
+ * condition is met.
+ *
+ * @author Ben Culkin
+ */
+public class ExplodingDice implements DieList {
+ /* The source die to use. */
+ private final Die source;
+
+ /* The conditions for exploding. */
+ private final Predicate<Long> explodeOn;
+ private final String explodePattern;
+ /* Whether or not to apply a -1 penalty to explosions. */
+ private final boolean explodePenetrates;
+
+ /**
+ * Create a new exploding die.
+ *
+ * @param src
+ * The source die for exploding.
+ * @param explode
+ * The condition to explode on.
+ */
+ public ExplodingDice(final Die src, final Predicate<Long> explode) {
+ this(src, explode, null, false);
+ }
+
+ /**
+ * Create a new exploding die that may penetrate.
+ *
+ * @param src
+ * The source die for exploding.
+ * @param explode
+ * The condition to explode on.
+ * @param penetrate
+ * Whether or not for explosions to penetrate (-1 to
+ * exploded die).
+ */
+ public ExplodingDice(final Die src, final Predicate<Long> explode,
+ final boolean penetrate) {
+ this(src, explode, null, penetrate);
+ }
+
+ /**
+ * Create a new exploding die that may penetrate.
+ *
+ * @param src
+ * The source die for exploding.
+ * @param explode
+ * The condition to explode on.
+ * @param penetrate
+ * Whether or not for explosions to penetrate (-1 to
+ * exploded die).
+ * @param patt
+ * The string the condition came from, for printing.
+ */
+ public ExplodingDice(final Die src, final Predicate<Long> explode, final String patt,
+ final boolean penetrate) {
+ source = src;
+ explodeOn = explode;
+ explodePattern = patt;
+ explodePenetrates = penetrate;
+ }
+
+ @Override
+ public boolean canOptimize() {
+ return false;
+ }
+
+ @Override
+ public long[] optimize() {
+ return new long[0];
+ }
+
+ @Override
+ public long[] roll() {
+ final long res = source.roll();
+ long oldRes = res;
+
+ final List<Long> resList = new LinkedList<>();
+ resList.add(res);
+
+ while (explodeOn.test(oldRes)) {
+ oldRes = source.rollSingle();
+
+ if (explodePenetrates) {
+ oldRes -= 1;
+ }
+
+ resList.add(oldRes);
+ }
+
+ final long resArr[] = new long[resList.size()];
+
+ int i = 0;
+ for(long rll : resList) {
+ resArr[i] = rll;
+ i += 1;
+ }
+
+ return resArr;
+ }
+
+ @Override
+ public String toString() {
+ String penString = explodePenetrates ? "p" : "";
+ String sourceString = source.toString();
+
+ if (explodePattern == null) {
+ return String.format("%s%s!<complex-pred>", sourceString, penString);
+ }
+
+ return String.format("%s%s!%s", sourceString, penString, explodePattern);
+ }
+}
diff --git a/base/src/bjc/dicelang/dice/FudgeDie.java b/base/src/bjc/dicelang/dice/FudgeDie.java
new file mode 100644
index 0000000..23951b2
--- /dev/null
+++ b/base/src/bjc/dicelang/dice/FudgeDie.java
@@ -0,0 +1,67 @@
+package bjc.dicelang.dice;
+
+/**
+ * A fudge die, one that has -1, 0 and 1 as its sides.
+ *
+ * @author EVE
+ *
+ */
+public class FudgeDie implements Die {
+ /* The number of dice to roll. */
+ private final Die numDice;
+
+ /**
+ * Create a new fudge die.
+ *
+ * @param nDice
+ * The number of dice to roll.
+ */
+ public FudgeDie(final long nDice) {
+ numDice = new ScalarDie(nDice);
+ }
+
+ /**
+ * Create a new fudge die.
+ *
+ * @param nDice
+ * The number of dice to roll.
+ */
+ public FudgeDie(final Die nDice) {
+ numDice = nDice;
+ }
+
+ @Override
+ public boolean canOptimize() {
+ return numDice.canOptimize() && numDice.optimize() == 0;
+ }
+
+ @Override
+ public long optimize() {
+ return 0;
+ }
+
+ @Override
+ public long roll() {
+ long res = 0;
+
+ final long nDice = numDice.roll();
+
+ for (int i = 0; i < nDice; i++) {
+ res += rollSingle();
+ }
+
+ return res;
+ }
+
+ @Override
+ public long rollSingle() {
+ return DiceBox.rng.nextInt(3) - 1;
+ }
+
+ @Override
+ public String toString() {
+ String dieString = numDice.toString();
+
+ return String.format("%sdF", dieString);
+ }
+}
diff --git a/base/src/bjc/dicelang/dice/MathDie.java b/base/src/bjc/dicelang/dice/MathDie.java
new file mode 100644
index 0000000..e4f2953
--- /dev/null
+++ b/base/src/bjc/dicelang/dice/MathDie.java
@@ -0,0 +1,121 @@
+package bjc.dicelang.dice;
+
+/**
+ * A die that represents two dice with an applied math operator.
+ *
+ * @author EVE
+ *
+ */
+public class MathDie implements Die {
+ /*
+ * @TODO 10/08/17 Ben Culkin :MathGeneralize
+ * Why do we have the operator types hardcoded, instead of just
+ * having a general thing for applying a binary operator to dice?
+ * Fix this by changing it to the more general form.
+ */
+ /**
+ * The types of a math operator.
+ *
+ * @author EVE
+ *
+ */
+ public static enum MathOp {
+ /** Add two dice. */
+ ADD,
+ /** Subtract two dice. */
+ SUBTRACT,
+ /** Multiply two dice. */
+ MULTIPLY;
+
+ @Override
+ public String toString() {
+ switch (this) {
+ case ADD:
+ return "+";
+
+ case SUBTRACT:
+ return "-";
+
+ case MULTIPLY:
+ return "*";
+
+ default:
+ return this.name();
+ }
+ }
+ }
+
+ private final MathDie.MathOp type;
+
+ private final Die left;
+ private final Die right;
+
+ /**
+ * Create a new math die.
+ *
+ * @param op
+ * The operator to apply.
+ *
+ * @param lft
+ * The left operand.
+ *
+ * @param rght
+ * The right operand.
+ */
+ public MathDie(final MathDie.MathOp op, final Die lft, final Die rght) {
+ type = op;
+
+ left = lft;
+ right = rght;
+ }
+
+ @Override
+ public boolean canOptimize() {
+ return left.canOptimize() && right.canOptimize();
+ }
+
+ private long performOp(final long lft, final long rght) {
+ switch (type) {
+ case ADD:
+ return lft + rght;
+
+ case SUBTRACT:
+ return lft - rght;
+
+ case MULTIPLY:
+ return lft * rght;
+
+ default:
+ return 0;
+ }
+ }
+
+ @Override
+ public long optimize() {
+ final long lft = left.optimize();
+ final long rght = right.optimize();
+
+ return performOp(lft, rght);
+ }
+
+ @Override
+ public long roll() {
+ final long lft = left.roll();
+ final long rght = right.roll();
+
+ return performOp(lft, rght);
+ }
+
+ @Override
+ public long rollSingle() {
+ final long lft = left.rollSingle();
+ final long rght = right.rollSingle();
+
+ return performOp(lft, rght);
+ }
+
+ @Override
+ public String toString() {
+ return left.toString() + " " + type.toString() + " " + right.toString();
+ }
+}
diff --git a/base/src/bjc/dicelang/dice/ScalarDie.java b/base/src/bjc/dicelang/dice/ScalarDie.java
new file mode 100644
index 0000000..cbe9d3f
--- /dev/null
+++ b/base/src/bjc/dicelang/dice/ScalarDie.java
@@ -0,0 +1,47 @@
+package bjc.dicelang.dice;
+
+/**
+ * A scalar die, that always returns a given number.
+ *
+ * @author EVE
+ *
+ */
+public class ScalarDie implements Die {
+ /* The die value. */
+ private final long val;
+
+ /**
+ * Create a new scalar die with a set value.
+ *
+ * @param vl
+ * The value to use.
+ */
+ public ScalarDie(final long vl) {
+ val = vl;
+ }
+
+ @Override
+ public boolean canOptimize() {
+ return true;
+ }
+
+ @Override
+ public long optimize() {
+ return val;
+ }
+
+ @Override
+ public long roll() {
+ return val;
+ }
+
+ @Override
+ public long rollSingle() {
+ return val;
+ }
+
+ @Override
+ public String toString() {
+ return String.format("%d", val);
+ }
+}
diff --git a/base/src/bjc/dicelang/dice/SimpleDie.java b/base/src/bjc/dicelang/dice/SimpleDie.java
new file mode 100644
index 0000000..6cae423
--- /dev/null
+++ b/base/src/bjc/dicelang/dice/SimpleDie.java
@@ -0,0 +1,119 @@
+package bjc.dicelang.dice;
+
+/**
+ * A simple group of dice.
+ *
+ * @author EVE
+ *
+ */
+public class SimpleDie implements Die {
+ /* The number of dice to roll. */
+ private final Die numDice;
+ /*
+ * The size of each dice to roll.
+ *
+ * Rolled once per role, not once for each dice rolled.
+ *
+ * @NOTE
+ * Would having some way to roll it once for each dice rolled be
+ * useful in any sort of case?
+ */
+ private final Die diceSize;
+
+ /**
+ * Create a new dice group.
+ *
+ * @param nDice
+ * The number of dice.
+ *
+ * @param size
+ * The size of the dice.
+ */
+ public SimpleDie(final long nDice, final long size) {
+ this(new ScalarDie(nDice), new ScalarDie(size));
+ }
+
+ /**
+ * Create a new dice group.
+ *
+ * @param nDice
+ * The number of dice.
+ *
+ * @param size
+ * The size of the dice.
+ */
+ public SimpleDie(final Die nDice, final long size) {
+ this(nDice, new ScalarDie(size));
+ }
+
+ /**
+ * Create a new dice group.
+ *
+ * @param nDice
+ * The number of dice.
+ *
+ * @param size
+ * The size of the dice.
+ */
+ public SimpleDie(final long nDice, final Die size) {
+ this(new ScalarDie(nDice), size);
+ }
+
+ /**
+ * Create a new dice group.
+ *
+ * @param nDice
+ * The number of dice.
+ *
+ * @param size
+ * The size of the dice.
+ */
+ public SimpleDie(final Die nDice, final Die size) {
+ numDice = nDice;
+ diceSize = size;
+ }
+
+ @Override
+ public boolean canOptimize() {
+ if (diceSize.canOptimize() && diceSize.optimize() <= 1) {
+ return numDice.canOptimize();
+ }
+
+ return false;
+ }
+
+ @Override
+ public long optimize() {
+ final long optSize = diceSize.optimize();
+
+ if (optSize == 0) {
+ return 0;
+ }
+
+ return numDice.optimize();
+ }
+
+ @Override
+ public long roll() {
+ long total = 0;
+
+ final long nDice = numDice.roll();
+ final long dSize = diceSize.roll();
+
+ for (int i = 0; i < nDice; i++) {
+ total += Math.abs(DiceBox.rng.nextLong()) % dSize + 1;
+ }
+
+ return total;
+ }
+
+ @Override
+ public long rollSingle() {
+ return Math.abs(DiceBox.rng.nextLong()) % diceSize.roll() + 1;
+ }
+
+ @Override
+ public String toString() {
+ return numDice + "d" + diceSize;
+ }
+}
diff --git a/base/src/bjc/dicelang/dice/SimpleDieList.java b/base/src/bjc/dicelang/dice/SimpleDieList.java
new file mode 100644
index 0000000..8391054
--- /dev/null
+++ b/base/src/bjc/dicelang/dice/SimpleDieList.java
@@ -0,0 +1,77 @@
+package bjc.dicelang.dice;
+
+/**
+ * A simple list of dice.
+ *
+ * @author EVE
+ *
+ * @TODO 10/08/17 Ben Culkin :DieListGeneralize
+ * DieList in general should be changed to be able to be
+ * constructed from an arbitrary die using rollSingle and things
+ * like that.
+ */
+public class SimpleDieList implements DieList {
+ /* The number of dice to roll. */
+ private final Die numDice;
+ /*
+ * The size of each die to roll.
+ *
+ * Checked once per roll, not once per dice rolled.
+ */
+ private final Die size;
+
+ /**
+ * Create a new list of dice.
+ *
+ * @param nDice
+ * The number of dice in the list.
+ *
+ * @param sze
+ * The size of dice in the list.
+ */
+ public SimpleDieList(final Die nDice, final Die sze) {
+ numDice = nDice;
+ size = sze;
+ }
+
+ @Override
+ public boolean canOptimize() {
+ if (size.canOptimize() && size.optimize() <= 1) {
+ return numDice.canOptimize();
+ }
+
+ return false;
+ }
+
+ @Override
+ public long[] optimize() {
+ final int sze = (int) numDice.optimize();
+ final long res = size.optimize();
+
+ final long[] ret = new long[sze];
+
+ for (int i = 0; i < sze; i++) {
+ ret[i] = res;
+ }
+
+ return ret;
+ }
+
+ @Override
+ public long[] roll() {
+ final int num = (int) numDice.roll();
+
+ final long[] ret = new long[num];
+
+ for (int i = 0; i < num; i++) {
+ ret[i] = size.roll();
+ }
+
+ return ret;
+ }
+
+ @Override
+ public String toString() {
+ return numDice.toString() + "dl" + size.toString();
+ }
+}
diff --git a/base/src/bjc/dicelang/expr/Ezpr.java b/base/src/bjc/dicelang/expr/Ezpr.java
new file mode 100644
index 0000000..862c097
--- /dev/null
+++ b/base/src/bjc/dicelang/expr/Ezpr.java
@@ -0,0 +1,149 @@
+/*
+ * @TODO 10/08/17 Ben Culkin :EzprFixing
+ * Implement these, and make sure they work correctly.
+ */
+// package bjc.dicelang.expr;
+
+// import bjc.utils.data.ITree;
+
+// import com.google.guava.collect.HashMultiset;
+// import com.google.guava.collect.Multiset;
+
+// import static bjc.dicelang.expr.EzprType;
+// import static bjc.dicelang.expr.EzprType.SUM;
+// import static bjc.dicelang.expr.EzprType.MUL;
+
+// import static bjc.dicelang.expr.EzprNode;
+// import static bjc.dicelnag.ezpr.EzprNode.EzprNodeType;
+// import static bjc.dicelnag.ezpr.EzprNode.EzprNodeType.EZPR;
+// import static bjc.dicelnag.ezpr.EzprNode.EzprNodeType.TOKEN;
+
+// public class Ezpr {
+// public static enum EzprType {
+// SUM, MUL
+// }
+
+// public static class EzprNode {
+// public static enum EzprNodeType {
+// EZPR, TOKEN
+// }
+
+// public final EzprNodeType typ;
+
+// public final Ezpr ezp;
+// public final Token tok;
+
+// public EzprNode(Ezpr exp) {
+// typ = EZPR;
+
+// ezp = exp;
+// tok = null;
+// }
+
+// public EzprNode(Token tk) {
+// typ = TOKEN;
+
+// tok = tk;
+// ezp = null;
+// }
+
+// public String toString() {
+// if(typ == TOKEN) {
+// return tok.toString();
+// }
+// return ezp.toString();
+// }
+// }
+
+// private EzprType typ;
+
+// private Multiset<EzprNode> positive;
+// private Multiset<EzprNode> negative;
+
+// public Ezpr(EzprType type, Multiset<EzprNode> pos, Multiset<EzprNode> neg) {
+// typ = type;
+
+// positive = pos;
+// negative = neg;
+// }
+
+// public Ezpr flatten() {
+// HashMultiset<EzprNode> newPositive = HashMultiset.create();
+// HashMultiset<EzprNode> newNegative = HashMultiset.create();
+
+// for(EzprNode nd : positive) {
+// /* Flatten enclosed ezprs of the same type. */
+// if(nd.typ == EZPR && (nd.ezp.typ == typ)) {
+// /* Recursively flatten kids. */
+// Ezpr kid = nd.ezp.flatten();
+
+// if(typ == SUM) {
+// /* Add sum parts to corresponding bags. */
+// for(EzprNode knd : kid.positive) {
+// newPositive.add(knd);
+// }
+// for(EzprNode knd : kid.negative) {
+// newNegative.add(knd);
+// }
+// } else {
+// /* @TODO ensure that this is correct. */
+// for(EzprNode knd : kid.positive) {
+// newPositive.add(knd);
+// }
+// for(EzprNode knd : kid.negative) {
+// newNegative.add(knd);
+// }
+// }
+// } else {
+// newPositive.add(nd);
+// }
+// }
+
+// for(EzprNode nd : negative) {
+// /* Flatten enclosed ezprs of the same type. */
+// if(nd.typ == EZPR && (nd.ezp.typ == typ)) {
+// /* Recursively flatten kids. */
+// Ezpr kid = nd.ezp.flatten();
+
+// /* @TODO ensure that this is correct. */
+// if(typ == SUM) {
+// for(EzprNode knd : kid.positive) {
+// newNegative.add(knd);
+// }
+// for(EzprNode knd : kid.negative) {
+// newPositive.add(knd);
+// }
+// } else {
+// for(EzprNode knd : kid.positive) {
+// newNegative.add(knd);
+// }
+// for(EzprNode knd : kid.negative) {
+// newPositive.add(knd);
+// }
+// }
+// } else {
+// newNegative.add(nd);
+// }
+// }
+// }
+
+// public String toString() {
+// StringBuilder sb = new StringBuilder(typ.toString());
+
+// sb.append(" [ ");
+// for(EzprNode nd : positive) {
+// sb.append(nd.toString());
+// sb.append(" ");
+// }
+
+// sb.append("# ");
+// for(EzprNode nd : negative) {
+// sb.append(nd.toString());
+// sb.append(" ");
+// }
+
+// sb.append("]");
+
+// return sb.toString();
+// }
+// }
diff --git a/base/src/bjc/dicelang/expr/Lexer.java b/base/src/bjc/dicelang/expr/Lexer.java
new file mode 100644
index 0000000..dfa0f76
--- /dev/null
+++ b/base/src/bjc/dicelang/expr/Lexer.java
@@ -0,0 +1,62 @@
+package bjc.dicelang.expr;
+
+import java.util.LinkedList;
+import java.util.List;
+
+import bjc.utils.funcdata.IList;
+import bjc.utils.parserutils.splitter.ConfigurableTokenSplitter;
+
+/*
+ * @TODO 10/08/18 :IntExpressions
+ * Add support for integer constants, and maybe floating-point ones as well
+ * if you feel like. Heck, you could even go for ratio constants and things
+ * as well.
+ */
+/**
+ * Implements the lexer for simple expression operations.
+ *
+ * @author Ben Culkin
+ */
+public class Lexer {
+ /* Splitter we use. */
+ private final ConfigurableTokenSplitter split;
+
+ /** Create a new expression lexer. */
+ public Lexer() {
+ split = new ConfigurableTokenSplitter(true);
+
+ split.addSimpleDelimiters("(", ")");
+ split.addSimpleDelimiters("+", "-", "*", "/");
+ }
+
+ /**
+ * Convert a string from a input command to a series of infix tokens.
+ *
+ * @param inp
+ * The input command.
+ *
+ * @param tks
+ * The token state.
+ *
+ * @return A series of infix tokens representing the command.
+ */
+ public Token[] lexString(final String inp, final Tokens tks) {
+ /* Split tokens on whitespace. */
+ final String[] spacedTokens = inp.split("[ \t]");
+ /* Tokens to return. */
+ final List<Token> tokens = new LinkedList<>();
+
+ /* Process each token. */
+ for (final String spacedToken : spacedTokens) {
+ /* Split on operators. */
+ final IList<String> splitTokens = split.split(spacedToken);
+ /* Convert strings to tokens. */
+ final IList<Token> rawTokens = splitTokens.map(tok -> tks.lexToken(tok, spacedToken));
+
+ /* Add tokens to results. */
+ rawTokens.forEach(tokens::add);
+ }
+
+ return tokens.toArray(new Token[0]);
+ }
+}
diff --git a/base/src/bjc/dicelang/expr/Parser.java b/base/src/bjc/dicelang/expr/Parser.java
new file mode 100644
index 0000000..5fa2d3d
--- /dev/null
+++ b/base/src/bjc/dicelang/expr/Parser.java
@@ -0,0 +1,147 @@
+package bjc.dicelang.expr;
+
+import java.util.Arrays;
+import java.util.Scanner;
+
+import bjc.utils.data.ITree;
+import bjc.utils.funcdata.FunctionalList;
+import bjc.utils.parserutils.TreeConstructor;
+
+/**
+ * Parser for simple math expressions.
+ *
+ * @author Ben Culkin
+ */
+public class Parser {
+ /*
+ * @TODO 10/08/17 Ben Culkin :MainSeperation
+ * This main method should be moved to its own class.
+ */
+ /**
+ * Main method.
+ *
+ * @param args
+ * Unused CLI args.
+ */
+ public static void main(final String[] args) {
+ /* Create our objects. */
+ final Tokens toks = new Tokens();
+ final Lexer lex = new Lexer();
+
+ /* Prepare our input source. */
+ final Scanner scan = new Scanner(System.in);
+
+ /* Read initial command. */
+ System.out.print("Enter a math expression (blank line to quit): ");
+ String ln = scan.nextLine().trim();
+
+ /* Enter REPL loop. */
+ while (!ln.equals("")) {
+ /* Print raw command. */
+ System.out.println("Raw command: " + ln);
+ System.out.println();
+
+ /* Lex command to infix tokens. */
+ final Token[] infixTokens = lex.lexString(ln, toks);
+ System.out.println("Lexed tokens: ");
+ for (final Token tok : infixTokens) {
+ System.out.println("\t" + tok);
+ }
+
+ /* Print out infix expression. */
+ System.out.print("Lexed expression: ");
+ for (final Token tok : infixTokens) {
+ System.out.print(tok.toExpr() + " ");
+ }
+
+ /* Space stages. */
+ System.out.println();
+ System.out.println();
+
+ /* Shunt infix tokens to postfix tokens. */
+ final Token[] postfixTokens = Shunter.shuntTokens(infixTokens);
+ System.out.println("Lexed tokens: ");
+ for (final Token tok : postfixTokens) {
+ System.out.println("\t" + tok);
+ }
+
+ /* Print out postfix tokens. */
+ System.out.print("Shunted expression: ");
+ for (final Token tok : postfixTokens) {
+ System.out.print(tok.toExpr() + " ");
+ }
+
+ /* Space stages. */
+ System.out.println();
+ System.out.println();
+
+ /* Construct a list from the array of tokens. */
+ final FunctionalList<Token> tokList = new FunctionalList<>(
+ Arrays.asList(postfixTokens));
+
+ /* Construct a tree from the list of postfixed tokens. */
+ final ITree<Token> ast = TreeConstructor.constructTree(tokList,
+ tok -> tok.typ.isOperator);
+
+ /* Print the tree, then the canonical expression for it. */
+ System.out.println("Parsed tree");
+ System.out.println(ast.toString());
+ System.out.println("\nCanonical expr: " + toCanonicalExpr(ast));
+
+ /* Space stages. */
+ System.out.println();
+ System.out.println();
+
+ /* Prompt for a new expression. */
+ System.out.print("Enter a math expression (blank line to quit): ");
+ /* Read it. */
+ ln = scan.nextLine().trim();
+ }
+
+ /* Cleanup after ourselves. */
+ scan.close();
+ }
+
+ /*
+ * Convert an expression to one that uses the smallest necessary amount
+ * of parens.
+ */
+ private static String toCanonicalExpr(final ITree<Token> ast) {
+ final Token data = ast.getHead();
+
+ if (ast.getChildrenCount() == 0) {
+ /* Handle leaf nodes. */
+ return data.toExpr();
+ }
+
+ /* The left/right children. */
+ final ITree<Token> left = ast.getChild(0);
+ final ITree<Token> right = ast.getChild(1);
+
+ /* Recursively canonicalize them. */
+ String leftExpr = toCanonicalExpr(left);
+ String rightExpr = toCanonicalExpr(right);
+
+ /* Add parens if the left was higher priority. */
+ if (left.getChildrenCount() == 0) {
+ int leftPriority = left.getHead().typ.operatorPriority;
+ int dataPriority = data.typ.operatorPriority;
+
+ if (leftPriority >= dataPriority) {
+ leftExpr = String.format("(%s)", leftExpr);
+ }
+ }
+
+ /* Add parens if the right was higher priority. */
+ if (right.getChildrenCount() == 0) {
+ int rightPriority = right.getHead().typ.operatorPriority;
+ int dataPriority = data.typ.operatorPriority;
+
+ if (rightPriority >= dataPriority) {
+ rightExpr = String.format("(%s)", rightExpr);
+ }
+ }
+
+ return String.format("%s %s %s", leftExpr, data.toExpr(), rightExpr);
+ }
+}
diff --git a/base/src/bjc/dicelang/expr/Shunter.java b/base/src/bjc/dicelang/expr/Shunter.java
new file mode 100644
index 0000000..213e473
--- /dev/null
+++ b/base/src/bjc/dicelang/expr/Shunter.java
@@ -0,0 +1,110 @@
+package bjc.dicelang.expr;
+
+import java.util.ArrayList;
+import java.util.Deque;
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * Converts a infix series of tokens into a prefix series of tokens.
+ *
+ * @author Ben Culkin
+ */
+public class Shunter {
+ /*
+ * @NOTE
+ * Why does this method return an array, and not the list of
+ * tokens?
+ */
+ /**
+ * Convert a infix series of tokens to a postfix series of tokens.
+ *
+ * @param infixTokens
+ * The tokens in infix order.
+ *
+ * @return The tokens in postfix order.
+ */
+ public static Token[] shuntTokens(final Token[] infixTokens) {
+ /* The returned tokens. */
+ final List<Token> postfixTokens = new ArrayList<>(infixTokens.length);
+
+ /* The current stack of operators. */
+ final Deque<Token> opStack = new LinkedList<>();
+
+ /* Shunt each token. */
+ for (final Token tok : infixTokens) {
+ /* Handle operators. */
+ if (tok.typ.isOperator) {
+ Token curOp = opStack.peek();
+
+ /*
+ * Check if an operator is higher priority,
+ * respecting their left associativity.
+ *
+ * @NOTE
+ * Should this be factored out into a
+ * method?
+ */
+ int leftPriority = tok.typ.operatorPriority;
+ int rightPriority;
+
+ if (curOp == null) {
+ rightPriority = 0;
+ } else {
+ rightPriority = curOp.typ.operatorPriority;
+ }
+
+ boolean isHigherPrec = leftPriority >= rightPriority;
+
+ /*
+ * Pop all operators that are lower precedence
+ * than us.
+ */
+ while (!opStack.isEmpty() && isHigherPrec) {
+ postfixTokens.add(opStack.pop());
+ curOp = opStack.peek();
+
+ leftPriority = tok.typ.operatorPriority;
+ if (curOp == null) {
+ rightPriority = 0;
+ } else {
+ rightPriority = curOp.typ.operatorPriority;
+ }
+
+ isHigherPrec = leftPriority >= rightPriority;
+ }
+
+ opStack.push(tok);
+ } else if (tok.typ == TokenType.OPAREN) {
+ opStack.push(tok);
+ } else if (tok.typ == TokenType.CPAREN) {
+ Token curOp = opStack.peek();
+
+ /*
+ * Pop things until we find the matching
+ * parenthesis.
+ */
+ while (curOp.typ != TokenType.OPAREN) {
+ final Token tk = opStack.pop();
+ postfixTokens.add(tk);
+ curOp = opStack.peek();
+ }
+
+ if (!opStack.isEmpty()) {
+ opStack.pop();
+ }
+ } else {
+ postfixTokens.add(tok);
+ }
+ }
+
+ /*
+ * Flush remaining operators.
+ */
+ while (!opStack.isEmpty()) {
+ postfixTokens.add(opStack.pop());
+ }
+
+ return postfixTokens.toArray(new Token[0]);
+ }
+}
diff --git a/base/src/bjc/dicelang/expr/Token.java b/base/src/bjc/dicelang/expr/Token.java
new file mode 100644
index 0000000..bf92f97
--- /dev/null
+++ b/base/src/bjc/dicelang/expr/Token.java
@@ -0,0 +1,88 @@
+package bjc.dicelang.expr;
+
+/*
+ * @TODO 10/08/17 :TokenReorg
+ * I am not a fan of this 'having a bunch of subclasses' in one thing I
+ * seem to have been doing around this project. This should be multiple
+ * subclasses, one for each value for TokenType.
+ */
+/**
+ * Represents a lexical token.
+ *
+ * @author Ben Culkin
+ */
+public class Token {
+ /* The state for this token. */
+ private final Tokens tks;
+
+ /**
+ * The type of the token.
+ *
+ * Determines which fields have a value.
+ */
+ public final TokenType typ;
+
+ /** The integer value attached to this token. */
+ public int intValue;
+
+ /** The original string this token was part of. */
+ public String rawValue;
+
+ /**
+ * Create a new token.
+ *
+ * @param type
+ * The type of this token.
+ *
+ * @param raw
+ * The string this token came from.
+ *
+ * @param toks
+ * The state for this token
+ */
+ public Token(final TokenType type, final String raw, final Tokens toks) {
+ this.typ = type;
+
+ rawValue = raw;
+ tks = toks;
+ }
+
+ @Override
+ public String toString() {
+ String typeStr = typ.toString();
+ typeStr = String.format("%s (%s)", typeStr, typ.name());
+
+ if (typ == TokenType.VREF) {
+ typeStr += " (ind. " + intValue;
+ typeStr += ", sym. \"" + tks.symbolTable.get(intValue) + "\")";
+ }
+
+ return String.format("%s (originally from: %s)", typeStr, rawValue);
+ }
+
+ /**
+ * Convert this token into the string representation of it.
+ *
+ * @return The string representation of it.
+ */
+ public String toExpr() {
+ switch (typ) {
+ case ADD:
+ return "+";
+ case SUBTRACT:
+ return "-";
+ case MULTIPLY:
+ return "*";
+ case DIVIDE:
+ return "/";
+ case VREF:
+ return tks.symbolTable.get(intValue);
+ case OPAREN:
+ return "(";
+ case CPAREN:
+ return ")";
+ default:
+ return "???";
+ }
+ }
+}
diff --git a/base/src/bjc/dicelang/expr/TokenType.java b/base/src/bjc/dicelang/expr/TokenType.java
new file mode 100644
index 0000000..ad01917
--- /dev/null
+++ b/base/src/bjc/dicelang/expr/TokenType.java
@@ -0,0 +1,56 @@
+package bjc.dicelang.expr;
+
+/**
+ * Represents the type of this token.
+ *
+ * @author Ben Culkin
+ */
+public enum TokenType {
+ /*
+ * @NOTE
+ * Do we want to switch to auto-numbering the tokens? They were
+ * manually numbered because this was an assignment for PoPL and
+ * that was what Dr. Naz wanted.
+ */
+
+ /** Represents + */
+ ADD( 14, true, 0),
+ /** Represents - */
+ SUBTRACT(15, true, 0),
+ /** Represents * */
+ MULTIPLY(16, true, 1),
+ /** Represents / */
+ DIVIDE( 17, true, 1),
+
+ /** Represents variable names. */
+ VREF(11),
+
+ /** Represents ( */
+ OPAREN(0, false, 100),
+ /** Represents ) */
+ CPAREN(0, false, 100);
+
+ /** The ID number for this token type. */
+ public final int nVal;
+
+ /** Whether or not this type of token is an operator. */
+ public final boolean isOperator;
+ /** The priority of this operator, if it is one. */
+ public final int operatorPriority;
+
+ /* Create a new token. */
+ private TokenType(final int num, final boolean isOp, final int priority) {
+ nVal = num;
+ isOperator = isOp;
+ operatorPriority = priority;
+ }
+
+ private TokenType(final int num) {
+ this(num, false, -1);
+ }
+
+ @Override
+ public String toString() {
+ return Integer.toString(nVal);
+ }
+}
diff --git a/base/src/bjc/dicelang/expr/Tokens.java b/base/src/bjc/dicelang/expr/Tokens.java
new file mode 100644
index 0000000..287d2b4
--- /dev/null
+++ b/base/src/bjc/dicelang/expr/Tokens.java
@@ -0,0 +1,96 @@
+package bjc.dicelang.expr;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Contains per-instance state for token parsing.
+ *
+ * @author EVE
+ *
+ */
+public class Tokens {
+ /* Contains mappings from variable references to string names. */
+ private final Map<Integer, String> symTab;
+ /* Reverse index into the symbol table. */
+ private final Map<String, Integer> revSymTab;
+
+ /** Read-only view on the symbol table. */
+ public final Map<Integer, String> symbolTable;
+
+ /* Next index into the symbol table. */
+ private int nextSym;
+
+ /* Mapping from literal tokens to token types. */
+ private final Map<String, TokenType> litTokens;
+
+ /** Create a new set of tokens. */
+ public Tokens() {
+ /* Create tables. */
+ symTab = new HashMap<>();
+ revSymTab = new HashMap<>();
+
+ /* Init public view. */
+ symbolTable = Collections.unmodifiableMap(symTab);
+
+ /* Set sym ID. */
+ nextSym = 0;
+
+ /*
+ * Setup literal mappings.
+ *
+ * @NOTE
+ * Should this be a static member?
+ */
+ litTokens = new HashMap<>();
+ litTokens.put("+", TokenType.ADD);
+ litTokens.put("-", TokenType.SUBTRACT);
+ litTokens.put("*", TokenType.MULTIPLY);
+ litTokens.put("/", TokenType.DIVIDE);
+ litTokens.put("(", TokenType.OPAREN);
+ litTokens.put(")", TokenType.CPAREN);
+ }
+
+ /**
+ * Convert the string representation of a token into a token.
+ *
+ * @param tok
+ * The string representation of the token.
+ * @param raw
+ * The original string the token came from.
+ *
+ * @return The token the string represents.
+ */
+ public Token lexToken(final String tok, final String raw) {
+ if (litTokens.containsKey(tok)) {
+ /* Return matching literal token. */
+ return new Token(litTokens.get(tok), raw, this);
+ }
+
+ /* Its a variable reference. */
+ return parseVRef(tok, raw);
+ }
+
+ /* Parse a variable reference. */
+ private Token parseVRef(final String tok, final String raw) {
+ final Token tk = new Token(TokenType.VREF, raw, this);
+
+ if (revSymTab.containsKey(tok)) {
+ /* Reuse the entry if it exists. */
+ tk.intValue = revSymTab.get(tok);
+ } else {
+ /* Create a new entry. */
+ tk.intValue = nextSym;
+
+ /* Record it. */
+ symTab.put(nextSym, tok);
+ revSymTab.put(tok, nextSym);
+
+ /* Next ID. */
+ nextSym += 1;
+ }
+
+ return tk;
+ }
+}
diff --git a/base/src/bjc/dicelang/scl/StreamControlConsole.java b/base/src/bjc/dicelang/scl/StreamControlConsole.java
new file mode 100644
index 0000000..649c6fa
--- /dev/null
+++ b/base/src/bjc/dicelang/scl/StreamControlConsole.java
@@ -0,0 +1,75 @@
+package bjc.dicelang.scl;
+
+import bjc.utils.funcdata.FunctionalList;
+import bjc.utils.funcdata.IList;
+
+import java.util.Iterator;
+import java.util.Scanner;
+
+import java.util.function.Supplier;
+
+/**
+ * Implement a SCL REPL
+ *
+ * @author Ben Culkin
+ */
+public class StreamControlConsole {
+ /*
+ * @TODO 10/08/17 :SCLArgs
+ * Do something useful with the CLI args.
+ *
+ */
+ /**
+ * Main method
+ *
+ * @param args
+ * Unused CLI args.
+ */
+ public static void main(String[] args) {
+ /*
+ * Initialize vars.
+ *
+ * We can get away with passing the null, because StreamEngine
+ * doesn't reference any parts of DiceLangEngine.
+ */
+ StreamEngine sengine = new StreamEngine(null);
+ StreamControlEngine sclengine = new StreamControlEngine(sengine);
+ Scanner scn = new Scanner(System.in);
+
+ /* Get input from the user. */
+ System.out.print("Enter a SCL command string (blank to exit): ");
+
+ /* Process it. */
+ while (scn.hasNextLine()) {
+ String ln = scn.nextLine().trim();
+
+ if (ln.equals("")) {
+ /* Ignore empty lines. */
+ break;
+ }
+
+ /* Break the token into strings. */
+ IList<String> res = new FunctionalList<>();
+ String[] tokens = ln.split(" ");
+
+ /* Run the stream engine on the tokens. */
+ boolean succ = sengine.doStreams(tokens, res);
+ if (!succ) {
+ System.out.printf("ERROR: Stream engine failed for line '%s'\n", ln);
+ continue;
+ }
+
+ /* Run the command through SCL. */
+ tokens = res.toArray(new String[res.getSize()]);
+ succ = sclengine.runProgram(tokens);
+ if (!succ) {
+ System.out.printf("ERROR: SCL engine failed for line '%s'\n", ln);
+ continue;
+ }
+
+ /* Prompt again. */
+ System.out.print("Command string executed succesfully.\n\n");
+ System.out.print("Enter a SCL command string (blank to exit): ");
+ }
+ }
+}
diff --git a/base/src/bjc/dicelang/scl/StreamControlEngine.java b/base/src/bjc/dicelang/scl/StreamControlEngine.java
new file mode 100644
index 0000000..d5e8b72
--- /dev/null
+++ b/base/src/bjc/dicelang/scl/StreamControlEngine.java
@@ -0,0 +1,546 @@
+package bjc.dicelang.scl;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import bjc.dicelang.Errors;
+import bjc.utils.esodata.SimpleStack;
+import bjc.utils.esodata.Stack;
+import bjc.utils.funcdata.FunctionalList;
+import bjc.utils.funcdata.IList;
+import bjc.utils.parserutils.TokenUtils;
+
+import static bjc.dicelang.Errors.ErrorKey.*;
+import static bjc.dicelang.scl.StreamControlEngine.Token.Type.*;
+
+/*
+ * @TODO 10/08/17 Ben Culkin :SCLReorg
+ * This is a large enough class that it should maybe be split into
+ * subclasses.
+ */
+/**
+ * Runs a Stream Control Language (SCL) program.
+ *
+ * SCL is a stack-based concatenative language based mostly off of Postscript
+ * and Factor, with inspiration from various other languages.
+ *
+ * @author Ben Culkin
+ */
+public class StreamControlEngine {
+ /*
+ * @TODO 10/08/17 Ben Culkin :TokenSplit
+ * Again with the multiple subclasses in one class. Split it so
+ * that each subclass only has the fields it needs.
+ */
+ public static class Token {
+ public static enum Type {
+ /* Natural tokens. These come directly from strings */
+ ILIT, FLIT, BLIT, SQUOTE, DQUOTE, OBRACKET, OBRACE, SYMBOL, WORD,
+
+ /* Synthetic tokens. These are produced from special tokens. */
+ SLIT, WORDS, ARRAY,
+
+ /* Word tokens These are subordinate to WORD tokens */
+ /*
+ * @NOTE
+ * These should really be in their own enum.
+ */
+ /* Array manipulation */
+ MAKEARRAY, MAKEEXEC, MAKEUNEXEC,
+ /* Stream manipulation */
+ NEWSTREAM, LEFTSTREAM, RIGHTSTREAM, DELETESTREAM, MERGESTREAM,
+ /* Stack manipulation */
+ STACKCOUNT, STACKEMPTY, DROP, NDROP, NIP, NNIP,
+ }
+
+ /* The type of this token */
+ public Type type;
+
+ /* Used for ILIT */
+ public long intVal;
+ /* Used for FLIT */
+ public double floatVal;
+ /* Used for BLIT */
+ public boolean boolVal;
+ /* Used for SYMBOL & SLIT */
+ public String stringVal;
+ /* Used for WORD */
+ public Token tokenVal;
+ /* Used for WORDS & ARRAY */
+ public IList<Token> tokenVals;
+
+ /* Create a new token. */
+ public Token(final Type typ) {
+ type = typ;
+ }
+
+ /* Create a new token. */
+ public Token(final Type typ, final long iVal) {
+ this(typ);
+
+ intVal = iVal;
+ }
+
+ /* Create a new token. */
+ public Token(final Type typ, final double dVal) {
+ this(typ);
+
+ floatVal = dVal;
+ }
+
+ /* Create a new token. */
+ public Token(final Type typ, final boolean bVal) {
+ this(typ);
+
+ boolVal = bVal;
+ }
+
+ /* Create a new token. */
+ public Token(final Type typ, final String sVal) {
+ this(typ);
+
+ stringVal = sVal;
+ }
+
+ /* Create a new token. */
+ public Token(final Type typ, final Token tVal) {
+ this(typ);
+
+ tokenVal = tVal;
+ }
+
+ /* Create a new token. */
+ public Token(final Type typ, final Token.Type tVal) {
+ this(typ, new Token(tVal));
+ }
+
+ /* Create a new token. */
+ public Token(final Type typ, final IList<Token> tVals) {
+ this(typ);
+
+ tokenVals = tVals;
+ }
+
+ /* Convert a string into a token. */
+ public static Token tokenizeString(final String token) {
+ if (litTokens.containsKey(token)) {
+ return new Token(litTokens.get(token));
+ } else if (token.startsWith("\\")) {
+ return new Token(SYMBOL, token.substring(1));
+ } else if (builtinWords.containsKey(token)) {
+ return new Token(WORD, builtinWords.get(token));
+ } else if (token.equals("true")) {
+ return new Token(BLIT, true);
+ } else if (token.equals("false")) {
+ return new Token(BLIT, false);
+ } else if (TokenUtils.isInt(token)) {
+ return new Token(ILIT, Long.parseLong(token));
+ } else if (TokenUtils.isDouble(token)) {
+ return new Token(FLIT, Double.parseDouble(token));
+ } else {
+ Errors.inst.printError(EK_SCL_INVTOKEN, token);
+ return null;
+ }
+ }
+
+ /* The literal tokens. */
+ private static final Map<String, Token.Type> litTokens;
+ /* The builtin words. */
+ private static final Map<String, Token.Type> builtinWords;
+
+ static {
+ /* Init literal tokens. */
+ litTokens = new HashMap<>();
+
+ litTokens.put("'", SQUOTE);
+ litTokens.put("\"", DQUOTE);
+ litTokens.put("[", OBRACKET);
+ litTokens.put("{", OBRACE);
+
+ /* Init builtin words. */
+ builtinWords = new HashMap<>();
+
+ builtinWords.put("makearray", MAKEARRAY);
+ builtinWords.put("+stream", NEWSTREAM);
+ builtinWords.put(">stream", LEFTSTREAM);
+ builtinWords.put("<stream", RIGHTSTREAM);
+ builtinWords.put("-stream", DELETESTREAM);
+ builtinWords.put("<-stream", MERGESTREAM);
+ builtinWords.put("cvx", MAKEEXEC);
+ builtinWords.put("cvux", MAKEUNEXEC);
+ builtinWords.put("#", STACKCOUNT);
+ builtinWords.put("empty?", STACKEMPTY);
+ builtinWords.put("drop", DROP);
+ builtinWords.put("ndrop", NDROP);
+ builtinWords.put("nip", NIP);
+ builtinWords.put("nnip", NNIP);
+ }
+ }
+
+ /* The stream engine we're hooked to. */
+ private final StreamEngine eng;
+
+ /* The current stack state. */
+ private final Stack<Token> curStack;
+
+ /* Map of user defined words. */
+ private final Map<String, Token> words;
+
+ /**
+ * Create a new stream control engine.
+ *
+ * @param engine
+ * The engine to control.
+ */
+ public StreamControlEngine(final StreamEngine engine) {
+ eng = engine;
+
+ words = new HashMap<>();
+ curStack = new SimpleStack<>();
+ }
+
+ /**
+ * Run a SCL program.
+ *
+ * @param tokens
+ * The program to run.
+ *
+ * @return Whether the program executed successfully.
+ */
+ public boolean runProgram(final String[] tokens) {
+ for (int i = 0; i < tokens.length; i++) {
+ /* Tokenize each token. */
+ final String token = tokens[i];
+ final Token tok = Token.tokenizeString(token);
+
+ if (tok == null) {
+ System.out.printf("ERROR: Tokenization failed for '%s'\n", token);
+ return false;
+ }
+
+ /* Handle token types. */
+ switch (tok.type) {
+ case SQUOTE:
+ /* Handle single-quotes. */
+ i = handleSingleQuote(i, tokens);
+ if (i == -1) {
+ return false;
+ }
+ break;
+ case OBRACKET:
+ /* Handle delimited brackets. */
+ i = handleDelim(i, tokens, "]");
+ if (i == -1) {
+ return false;
+ }
+ break;
+ case OBRACE:
+ /* Handle delimited braces. */
+ i = handleDelim(i, tokens, "}");
+ if (i == -1) {
+ return false;
+ }
+ final Token brak = curStack.pop();
+ curStack.push(new Token(ARRAY, brak.tokenVals));
+ break;
+
+ case WORD:
+ /* Handle words. */
+ if(!handleWord(tok)) {
+ System.out.printf("WARNING: Execution of word '%s' failed\n", tok);
+ }
+ break;
+ default:
+ /* Put it onto the stack. */
+ curStack.push(tok);
+ break;
+ }
+ }
+
+ return true;
+ }
+
+ private boolean handleWord(final Token tk) {
+ boolean succ = true;
+
+ /* Handle each type of word. */
+ /*
+ * @NOTE
+ * This should probably use something other than a switch
+ * statement.
+ */
+ switch (tk.tokenVal.type) {
+ case NEWSTREAM:
+ eng.newStream();
+ break;
+ case LEFTSTREAM:
+ succ = eng.leftStream();
+ if (!succ) {
+ return false;
+ }
+ break;
+ case RIGHTSTREAM:
+ succ = eng.rightStream();
+ if (!succ) {
+ return false;
+ }
+ break;
+ case DELETESTREAM:
+ succ = eng.deleteStream();
+ if (!succ) {
+ return false;
+ }
+ break;
+ case MERGESTREAM:
+ succ = eng.mergeStream();
+ if (!succ) {
+ return false;
+ }
+ break;
+ case MAKEARRAY:
+ succ = makeArray();
+ if (!succ) {
+ return false;
+ }
+ break;
+ case MAKEEXEC:
+ succ = toggleExec(true);
+ if (!succ) {
+ return false;
+ }
+ break;
+ case MAKEUNEXEC:
+ succ = toggleExec(false);
+ if (!succ) {
+ return false;
+ }
+ break;
+ case STACKCOUNT:
+ curStack.push(new Token(ILIT, curStack.size()));
+ break;
+ case STACKEMPTY:
+ curStack.push(new Token(BLIT, curStack.empty()));
+ break;
+ case DROP:
+ if (curStack.size() == 0) {
+ Errors.inst.printError(EK_SCL_SUNDERFLOW, tk.tokenVal.type.toString());
+ return false;
+ }
+ curStack.drop();
+ break;
+ case NDROP:
+ succ = handleNDrop();
+ if (!succ) {
+ return false;
+ }
+ break;
+ case NIP:
+ if (curStack.size() < 2) {
+ Errors.inst.printError(EK_SCL_SUNDERFLOW, tk.tokenVal.type.toString());
+ return false;
+ }
+ curStack.nip();
+ break;
+ case NNIP:
+ succ = handleNNip();
+ if (!succ) {
+ return false;
+ }
+ break;
+ default:
+ Errors.inst.printError(EK_SCL_UNWORD, tk.tokenVal.type.toString());
+ return false;
+ }
+
+ return true;
+ }
+
+ /* Handle nipping a specified number of items. */
+ private boolean handleNNip() {
+ final Token num = curStack.pop();
+
+ if (num.type != ILIT) {
+ Errors.inst.printError(EK_SCL_INVARG, num.type.toString());
+ return false;
+ }
+
+ final int n = (int) num.intVal;
+
+ if (curStack.size() < n) {
+ Errors.inst.printError(EK_SCL_SUNDERFLOW, NNIP.toString());
+ return false;
+ }
+
+ curStack.nip(n);
+ return true;
+ }
+
+ /* Handle dropping a specified number of items. */
+ private boolean handleNDrop() {
+ final Token num = curStack.pop();
+
+ if (num.type != ILIT) {
+ Errors.inst.printError(EK_SCL_INVARG, num.type.toString());
+ return false;
+ }
+
+ final int n = (int) num.intVal;
+
+ if (curStack.size() < n) {
+ Errors.inst.printError(EK_SCL_SUNDERFLOW, NDROP.toString());
+ return false;
+ }
+
+ curStack.drop(n);
+ return true;
+ }
+
+ /* Handle toggling the executable flag on an array. */
+ private boolean toggleExec(final boolean exec) {
+ final Token top = curStack.top();
+
+ if (exec) {
+ if (top.type != ARRAY) {
+ Errors.inst.printError(EK_SCL_INVARG, top.toString());
+ return false;
+ }
+
+ top.type = WORDS;
+ } else {
+ if (top.type != WORDS) {
+ Errors.inst.printError(EK_SCL_INVARG, top.toString());
+ return false;
+ }
+
+ top.type = ARRAY;
+ }
+
+ return true;
+ }
+
+ /* Handle creating an array. */
+ private boolean makeArray() {
+ final Token num = curStack.pop();
+
+ if (num.type != ILIT) {
+ Errors.inst.printError(EK_SCL_INVARG, num.type.toString());
+ }
+
+ final IList<Token> arr = new FunctionalList<>();
+
+ for (int i = 0; i < num.intVal; i++) {
+ arr.add(curStack.pop());
+ }
+
+ curStack.push(new Token(ARRAY, arr));
+
+ return true;
+ }
+
+ /* Handle a delimited series of tokens. */
+ private int handleDelim(final int i, final String[] tokens, final String delim) {
+ final IList<Token> toks = new FunctionalList<>();
+
+ int n = i + 1;
+
+ if (n >= tokens.length) {
+ Errors.inst.printError(EK_SCL_MMQUOTE);
+ return -1;
+ }
+
+ String tok = tokens[n];
+
+ while (!tok.equals(delim)) {
+ final Token ntok = Token.tokenizeString(tok);
+
+ switch (ntok.type) {
+ case SQUOTE:
+ n = handleSingleQuote(n, tokens);
+ if (n == -1) {
+ return -1;
+ }
+ toks.add(curStack.pop());
+ break;
+ case OBRACKET:
+ n = handleDelim(n, tokens, "]");
+ if (n == -1) {
+ return -1;
+ }
+ toks.add(curStack.pop());
+ break;
+ case OBRACE:
+ n = handleDelim(i, tokens, "}");
+ if (n == -1) {
+ return -1;
+ }
+ final Token brak = curStack.pop();
+ toks.add(new Token(ARRAY, brak.tokenVals));
+ break;
+ default:
+ toks.add(ntok);
+ }
+
+ /* Move to the next token */
+ n += 1;
+
+ if (n >= tokens.length) {
+ Errors.inst.printError(EK_SCL_MMQUOTE);
+ return -1;
+ }
+
+ tok = tokens[n];
+ }
+
+ /* Skip the closing bracket */
+ n += 1;
+
+ /* @NOTE
+ * Instead of being hardcoded, this should be a parameter.
+ */
+ curStack.push(new Token(WORDS, toks));
+
+ return n;
+ }
+
+ /* Handle a single-quoted string. */
+ private int handleSingleQuote(final int i, final String[] tokens) {
+ final StringBuilder sb = new StringBuilder();
+
+ int n = i + 1;
+
+ if (n >= tokens.length) {
+ Errors.inst.printError(EK_SCL_MMQUOTE);
+ return -1;
+ }
+
+ String tok = tokens[n];
+
+ while (!tok.equals("'")) {
+ if (tok.matches("\\\\+'")) {
+ /* Handle escaped quotes. */
+ sb.append(tok.substring(1));
+ } else {
+ sb.append(tok);
+ }
+
+ /* Move to the next token */
+ n += 1;
+
+ if (n >= tokens.length) {
+ Errors.inst.printError(EK_SCL_MMQUOTE);
+ return -1;
+ }
+
+ tok = tokens[n];
+ }
+
+ /*
+ * Skip the single quote
+ */
+ n += 1;
+
+ curStack.push(new Token(SLIT, TokenUtils.descapeString(sb.toString())));
+
+ return n;
+ }
+}
diff --git a/base/src/bjc/dicelang/scl/StreamEngine.java b/base/src/bjc/dicelang/scl/StreamEngine.java
new file mode 100644
index 0000000..6e970b7
--- /dev/null
+++ b/base/src/bjc/dicelang/scl/StreamEngine.java
@@ -0,0 +1,266 @@
+package bjc.dicelang.scl;
+
+import static bjc.dicelang.Errors.ErrorKey.EK_STRM_INVCOM;
+import static bjc.dicelang.Errors.ErrorKey.EK_STRM_LAST;
+import static bjc.dicelang.Errors.ErrorKey.EK_STRM_NONEX;
+
+import static java.util.logging.Level.*;
+
+import bjc.dicelang.DiceLangEngine;
+import bjc.dicelang.Errors;
+import bjc.utils.esodata.SingleTape;
+import bjc.utils.esodata.Tape;
+import bjc.utils.esodata.TapeLibrary;
+import bjc.utils.funcdata.FunctionalList;
+import bjc.utils.funcdata.IList;
+import bjc.utils.funcutils.ListUtils;
+
+import java.util.Arrays;
+
+/**
+ * Implements multiple interleaved parse streams, as well as a command language
+ * for the streams.
+ *
+ * The idea for the interleaved streams came from the language Oozylbub &amp;
+ * Murphy, but the command language was my own idea.
+ *
+ * @author Ben Culkin
+ */
+public class StreamEngine {
+ /* Whether or not we're doing debugging. */
+ public final boolean debug = true;
+
+ /* The engine we're attached to. */
+ DiceLangEngine eng;
+
+ /* Our streams. */
+ Tape<IList<String>> streams;
+ IList<String> currStream;
+
+ /* Saved streams */
+ TapeLibrary<IList<String>> savedStreams;
+
+ /* Handler for SCL programs */
+ private final StreamControlEngine scleng;
+
+ /**
+ * Create a new stream engine.
+ *
+ * @param engine
+ * The dice engine we're attached to.
+ */
+ public StreamEngine(final DiceLangEngine engine) {
+ eng = engine;
+
+ savedStreams = new TapeLibrary<>();
+ scleng = new StreamControlEngine(this);
+ }
+
+ /* Do pre-run (re)initialization. */
+ private void init() {
+ /* Reinitialize our list of streams. */
+ streams = new SingleTape<>();
+
+ /* Create an initial stream. */
+ currStream = new FunctionalList<>();
+ streams.insertBefore(currStream);
+ }
+
+ /**
+ * Process a possibly interleaved set of streams.
+ *
+ * @param toks
+ * The raw token to read streams from.
+ *
+ * @param dest
+ * The list to write the final stream to.
+ *
+ * @return Whether or not the streams were successfully processed.
+ */
+ public boolean doStreams(final String[] toks, final IList<String> dest) {
+ return doStreams(Arrays.asList(toks), dest);
+ }
+
+ /**
+ * Process a possibly interleaved set of streams.
+ *
+ * @param toks
+ * The raw token to read streams from.
+ *
+ * @param dest
+ * The list to write the final stream to.
+ *
+ * @return Whether or not the streams were successfully processed.
+ */
+ public boolean doStreams(final Iterable<String> toks, final IList<String> dest) {
+ /* Initialize per-run state. */
+ init();
+
+ /* Are we currently quoting things? */
+ boolean quoteMode = false;
+
+ /* Process each token. */
+ for (final String tk : toks) {
+ /* Process stream commands. */
+ if (tk.startsWith("{@S") && !quoteMode) {
+ if (tk.equals("{@SQ}")) {
+ /* Start quoting. */
+ quoteMode = true;
+ } else if (!processCommand(tk)) {
+ return false;
+ }
+ } else {
+ if (tk.equals("{@SU}")) {
+ /* Stop quoting. */
+ quoteMode = false;
+ } else if (tk.startsWith("\\") && tk.endsWith("{@SU}")) {
+ /* Unquote quoted end. */
+ currStream.add(tk.substring(1));
+ } else {
+ currStream.add(tk);
+ }
+ }
+ }
+
+ for (final String tk : currStream) {
+ /* Collect tokens from the current stream. */
+ dest.add(tk);
+ }
+
+ return true;
+ }
+
+ /** Create a new stream. */
+ public void newStream() {
+ streams.insertAfter(new FunctionalList<>());
+ }
+
+ /**
+ * Move to a stream to the right.
+ *
+ * @return Whether or not the move was successful.
+ */
+ public boolean rightStream() {
+ if (!streams.right()) {
+ Errors.inst.printError(EK_STRM_NONEX);
+ return false;
+ }
+
+ currStream = streams.item();
+ return true;
+ }
+
+ /**
+ * Move to a stream to the left.
+ *
+ * @return Whether or not the move was successful.
+ */
+ public boolean leftStream() {
+ if (!streams.left()) {
+ Errors.inst.printError(EK_STRM_NONEX);
+ return false;
+ }
+
+ currStream = streams.item();
+ return true;
+ }
+
+ /**
+ * Delete the current stream.
+ *
+ * @return Whether or not the delete succeeded.
+ */
+ public boolean deleteStream() {
+ if (streams.size() == 1) {
+ Errors.inst.printError(EK_STRM_LAST);
+ return false;
+ }
+
+ streams.remove();
+ currStream = streams.item();
+
+ return true;
+ }
+
+ /**
+ * Merge the current stream into the previous stream.
+ *
+ * @return Whether or not the merge succeded.
+ */
+ public boolean mergeStream() {
+ if (streams.size() == 1) {
+ Errors.inst.printError(EK_STRM_LAST);
+ return false;
+ }
+
+ final IList<String> stringLit = streams.remove();
+ currStream = streams.item();
+ currStream.add(ListUtils.collapseTokens(stringLit, " "));
+
+ return true;
+ }
+
+ private boolean processCommand(final String tk) {
+ char[] comms = null;
+
+ if (tk.length() > 5) {
+ /* Pull off {@S and closing } */
+ comms = tk.substring(3, tk.length() - 1).toCharArray();
+ } else {
+ /* Its a single char. command. */
+ comms = new char[1];
+ comms[0] = tk.charAt(3);
+ }
+
+ boolean succ;
+
+ /* Process each command. */
+ /*
+ * @TODO 10/09/17 Ben Culkin :StreamCommands
+ * This should probably be refactored in some way, so as to
+ * make it easier to add new commands.
+ */
+ for (final char comm : comms) {
+ switch (comm) {
+ case '+':
+ newStream();
+ break;
+ case '>':
+ succ = rightStream();
+ if (!succ) {
+ return false;
+ }
+ break;
+ case '<':
+ succ = leftStream();
+ if (!succ) {
+ return false;
+ }
+ break;
+ case '-':
+ succ = deleteStream();
+ if (!succ) {
+ return false;
+ }
+ break;
+ case 'M':
+ succ = mergeStream();
+ if (!succ) {
+ return false;
+ }
+ break;
+ case 'L':
+ succ = scleng.runProgram(currStream.toArray(new String[0]));
+ if (!succ) {
+ return false;
+ }
+ break;
+ default:
+ Errors.inst.printError(EK_STRM_INVCOM, tk);
+ return false;
+ }
+ }
+
+ return true;
+ }
+}
diff --git a/base/src/bjc/dicelang/util/ResourceLoader.java b/base/src/bjc/dicelang/util/ResourceLoader.java
new file mode 100644
index 0000000..db5c6fc
--- /dev/null
+++ b/base/src/bjc/dicelang/util/ResourceLoader.java
@@ -0,0 +1,43 @@
+package bjc.dicelang.util;
+
+import static bjc.dicelang.Errors.ErrorKey.EK_MISC_IOEX;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+
+import bjc.dicelang.Errors;
+
+/**
+ * Load resources bundled with DiceLang.
+ *
+ * @author EVE
+ *
+ */
+public class ResourceLoader {
+ /**
+ * Loads a .help file from the data/help directory.
+ *
+ * @param name
+ * The name of the help file to load.
+ *
+ * @return The contents of the help file, or null if it could not be
+ * opened.
+ */
+ public static String[] loadHelpFile(final String name) {
+ final URL fle = ResourceLoader.class.getResource("/data/help/" + name + ".help");
+
+ try {
+ Path pth = Paths.get(fle.toURI());
+
+ return Files.lines(pth).toArray(sze -> new String[sze]);
+ } catch (IOException | URISyntaxException ioex) {
+ Errors.inst.printError(EK_MISC_IOEX, fle.toString());
+ }
+
+ return null;
+ }
+}
diff --git a/base/todos.txt b/base/todos.txt
new file mode 100644
index 0000000..c83fe60
--- /dev/null
+++ b/base/todos.txt
@@ -0,0 +1,8 @@
+@TODO 10/08/17 Ben Culkin :ModuleSplitting
+ Split dice-lang into maven modules. Right now I'm thinking to split it
+ into the following modules
+ - dice (contains everything in the dice package)
+ - SCL (contains everything in the SCL package)
+ - lang (contains everything else)
+ This'll make it easier to use these parts elsewhere, as well as make
+ compilation times shorter