summaryrefslogtreecommitdiff
path: root/RGens/src/main/java/bjc/rgens/parser
diff options
context:
space:
mode:
authorstudent <student@student-OptiPlex-9020>2017-03-17 10:49:27 -0400
committerstudent <student@student-OptiPlex-9020>2017-03-17 10:49:27 -0400
commit0ea49dd4a52358f053c9be7138c392b16de05899 (patch)
tree802e275aaf279480ee8626136f56bfa1fbab6845 /RGens/src/main/java/bjc/rgens/parser
parent36cf3a0f0604ef43ce838ff6e9a7fc4e7c299522 (diff)
Move things around, and start on new parser.
Diffstat (limited to 'RGens/src/main/java/bjc/rgens/parser')
-rw-r--r--RGens/src/main/java/bjc/rgens/parser/.DS_Storebin0 -> 6148 bytes
-rw-r--r--RGens/src/main/java/bjc/rgens/parser/GrammarReaderApp.java93
-rw-r--r--RGens/src/main/java/bjc/rgens/parser/GrammarReaderCLI.java61
-rw-r--r--RGens/src/main/java/bjc/rgens/parser/PragmaErrorException.java21
-rw-r--r--RGens/src/main/java/bjc/rgens/parser/RBGrammarReader.java319
-rw-r--r--RGens/src/main/java/bjc/rgens/parser/ReaderState.java306
6 files changed, 800 insertions, 0 deletions
diff --git a/RGens/src/main/java/bjc/rgens/parser/.DS_Store b/RGens/src/main/java/bjc/rgens/parser/.DS_Store
new file mode 100644
index 0000000..5008ddf
--- /dev/null
+++ b/RGens/src/main/java/bjc/rgens/parser/.DS_Store
Binary files differ
diff --git a/RGens/src/main/java/bjc/rgens/parser/GrammarReaderApp.java b/RGens/src/main/java/bjc/rgens/parser/GrammarReaderApp.java
new file mode 100644
index 0000000..b3beb81
--- /dev/null
+++ b/RGens/src/main/java/bjc/rgens/parser/GrammarReaderApp.java
@@ -0,0 +1,93 @@
+package bjc.rgens.parser;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.PrintStream;
+
+import javax.swing.UIManager;
+import javax.swing.UnsupportedLookAndFeelException;
+
+import bjc.utils.funcutils.ListUtils;
+import bjc.utils.gen.WeightedGrammar;
+import bjc.utils.gui.SimpleDialogs;
+import bjc.utils.gui.awt.SimpleFileDialog;
+
+/**
+ * App that reads a grammar from a file and generates results
+ *
+ * @author ben
+ *
+ */
+public class GrammarReaderApp {
+ /**
+ * Main method of class
+ *
+ * @param args
+ * CLI args
+ */
+ public static void main(String[] args) {
+ try {
+ UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
+ } catch (ClassNotFoundException
+ | InstantiationException
+ | IllegalAccessException
+ | UnsupportedLookAndFeelException e) {
+ e.printStackTrace();
+ System.exit(1);
+ }
+
+ doSingleFile();
+ }
+
+ private static void doSingleFile() {
+ File gramFile = SimpleFileDialog.getOpenFile(null, "Choose Grammar File", ".gram");
+
+ WeightedGrammar<String> grammar = null;
+
+ try {
+ grammar = RBGrammarReader.fromPath(gramFile.toPath());
+ } catch (IOException ioex) {
+ ioex.printStackTrace();
+
+ System.exit(1);
+ }
+
+ String initRule = "";
+
+ if (!grammar.hasInitialRule()) {
+ grammar.getRuleNames().sort((leftString, rightString) -> {
+ return leftString.compareTo(rightString);
+ });
+
+ initRule = SimpleDialogs.getChoice(null,
+ "Pick a initial rule",
+ "Pick a initial rule to generate choices from",
+ grammar.getRuleNames().toArray(new String[0]));
+ } else {
+ initRule = grammar.getInitialRule();
+ }
+
+ int count = SimpleDialogs.getWhole(null,
+ "Enter number of repetitions",
+ "Enter the number of items to generate from the rule");
+
+ File outputFile = SimpleFileDialog.getSaveFile(null, "Choose Grammar File");
+
+ PrintStream outputStream = null;
+
+ try {
+ outputStream = new PrintStream(outputFile);
+ } catch (FileNotFoundException e) {
+ e.printStackTrace();
+ }
+
+ for (int i = 0; i < count; i++) {
+ String ruleResult = ListUtils.collapseTokens(grammar.generateListValues(initRule, " "));
+
+ outputStream.println(ruleResult.replaceAll("\\s+", " "));
+ }
+
+ outputStream.close();
+ }
+}
diff --git a/RGens/src/main/java/bjc/rgens/parser/GrammarReaderCLI.java b/RGens/src/main/java/bjc/rgens/parser/GrammarReaderCLI.java
new file mode 100644
index 0000000..b8eac35
--- /dev/null
+++ b/RGens/src/main/java/bjc/rgens/parser/GrammarReaderCLI.java
@@ -0,0 +1,61 @@
+package bjc.rgens.parser;
+
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.nio.file.Paths;
+
+import bjc.utils.funcutils.ListUtils;
+import bjc.utils.gen.WeightedGrammar;
+
+/**
+ * App that reads a grammar from a file and generates results
+ *
+ * @author ben
+ *
+ */
+public class GrammarReaderCLI {
+ private static WeightedGrammar<String> grammar = null;
+
+ /**
+ * Main application method
+ *
+ * @param args
+ * CLI args
+ */
+ public static void main(String[] args) {
+ if (args.length == 0) {
+ GrammarReaderApp.main(args);
+ } else {
+ String fName = args[0];
+
+ if (fName.equalsIgnoreCase("--help")) {
+ System.out.println(
+ "Usage: java -jar GrammarReader.jar <file-name> <init-rule> <num-res>");
+ System.exit(0);
+ }
+
+ String ruleName = args[1];
+
+ try (FileInputStream fStream = new FileInputStream(fName)) {
+ grammar = RBGrammarReader.fromPath(Paths.get(fName, ""));
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+
+ if (ruleName.equalsIgnoreCase("--list-rules")) {
+ grammar.getRuleNames().forEach(System.out::println);
+
+ System.exit(0);
+ }
+
+ int rCount = Integer.parseInt(args[2]);
+
+ for (int i = 0; i < rCount; i++) {
+ String ruleResult = ListUtils.collapseTokens(
+ grammar.generateListValues(ruleName, " "));
+
+ System.out.println(ruleResult.replaceAll("\\s+", " "));
+ }
+ }
+ }
+}
diff --git a/RGens/src/main/java/bjc/rgens/parser/PragmaErrorException.java b/RGens/src/main/java/bjc/rgens/parser/PragmaErrorException.java
new file mode 100644
index 0000000..05b71d3
--- /dev/null
+++ b/RGens/src/main/java/bjc/rgens/parser/PragmaErrorException.java
@@ -0,0 +1,21 @@
+package bjc.rgens.parser;
+
+/**
+ * Exception for error executing a pragma
+ *
+ * @author ben
+ *
+ */
+public class PragmaErrorException extends RuntimeException {
+ private static final long serialVersionUID = 7245421182038076899L;
+
+ /**
+ * Create a new exception with the given message
+ *
+ * @param message
+ * The message of the exception
+ */
+ public PragmaErrorException(String message) {
+ super(message);
+ }
+}
diff --git a/RGens/src/main/java/bjc/rgens/parser/RBGrammarReader.java b/RGens/src/main/java/bjc/rgens/parser/RBGrammarReader.java
new file mode 100644
index 0000000..7753d25
--- /dev/null
+++ b/RGens/src/main/java/bjc/rgens/parser/RBGrammarReader.java
@@ -0,0 +1,319 @@
+package bjc.rgens.parser;
+
+import com.mifmif.common.regex.Generex;
+
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.Random;
+import java.util.function.BiFunction;
+import java.util.function.Predicate;
+
+import bjc.utils.funcdata.FunctionalStringTokenizer;
+import bjc.utils.funcdata.FunctionalList;
+import bjc.utils.funcdata.FunctionalMap;
+import bjc.utils.funcdata.IList;
+import bjc.utils.funcdata.IMap;
+import bjc.utils.funcutils.ListUtils;
+import bjc.utils.gen.WeightedGrammar;
+import bjc.utils.parserutils.RuleBasedConfigReader;
+
+/**
+ * Read a grammar from a stream
+ *
+ * @author ben
+ *
+ */
+public class RBGrammarReader {
+ private static RuleBasedConfigReader<ReaderState> reader;
+
+ private static Random numgen = new Random();
+
+ static {
+ setupReader();
+
+ initPragmas();
+ }
+
+ private static void addSubgrammarPragmas() {
+ reader.addPragma("new-sub-grammar", (tokenizer, state) -> {
+ state.startNewSubgrammar();
+ });
+
+ reader.addPragma("load-sub-grammar", RBGrammarReader::loadSubGrammar);
+ reader.addPragma("save-sub-grammar", RBGrammarReader::saveGrammar);
+ reader.addPragma("edit-sub-grammar", RBGrammarReader::editSubGrammar);
+ reader.addPragma("remove-sub-grammar", RBGrammarReader::removeSubGrammar);
+
+ reader.addPragma("edit-parent", (tokenizer, state) -> {
+ state.editParent();
+ });
+
+ reader.addPragma("promote", RBGrammarReader::promoteGrammar);
+ reader.addPragma("subordinate", RBGrammarReader::subordinateGrammar);
+
+ }
+
+ private static void debugGrammar(ReaderState state) {
+ System.out.println("Printing rule names: ");
+
+ for (String currentRule : state.getRuleNames().toIterable()) {
+ System.out.println("\t" + currentRule);
+ }
+
+ System.out.println();
+ }
+
+ private static void doCase(FunctionalStringTokenizer tokenizer, ReaderState state) {
+ int ruleProbability = readOptionalProbability(tokenizer, state);
+
+ state.addCase(ruleProbability, tokenizer.toList());
+ }
+
+ private static void editSubGrammar(FunctionalStringTokenizer tokenizer, ReaderState state) {
+ String subgrammarName = tokenizer.nextToken();
+
+ state.editSubgrammar(subgrammarName);
+ }
+
+ /**
+ * Read a grammar from a path
+ *
+ * @param inputPath
+ * The path to load the grammar from
+ *
+ * @return A grammar read from the stream
+ *
+ * @throws IOException
+ * If something goes wrong during file reading
+ *
+ */
+ public static WeightedGrammar<String> fromPath(Path inputPath) throws IOException {
+ ReaderState initialState = new ReaderState(inputPath);
+
+ try (FileInputStream inputStream = new FileInputStream(inputPath.toFile())) {
+ WeightedGrammar<String> gram = reader.fromStream(inputStream, initialState).getGrammar();
+
+ IMap<String, IList<String>> vars = new FunctionalMap<>();
+
+ Predicate<String> specialPredicate = (strang) -> {
+ if(strang.matches("\\{\\S+\\}") || strang.matches("\\[\\S+\\}")) {
+ return true;
+ }
+
+ return false;
+ };
+
+ BiFunction<String, WeightedGrammar<String>, IList<String>>
+ specialAction = (strang, gramm) -> {
+ IList<String> retList = new FunctionalList<>();
+
+ if(strang.matches("\\{\\S+\\}")) {
+ if(strang.matches("\\{\\S+:=\\S+\\}")) {
+ String[] varParts = strang.split(":=");
+
+ String varName = varParts[0].substring(1);
+ String ruleName = varParts[1].substring(0, varParts[1].length());
+
+ IList<String> varValue = gramm.generateGenericValues(
+ ruleName, (s) -> s, " ");
+
+ vars.put(varName, varValue);
+ } else if(strang.matches("\\{\\S+=\\S+\\}")) {
+ String[] varParts = strang.split("=");
+
+ String varName = varParts[0].substring(1);
+ String varValue = varParts[1].substring(0, varParts[1].length());
+
+ vars.put(varName, new FunctionalList<>(varValue));
+ } else {
+ // @FIXME notify the user they did something wrong
+ retList.add(strang);
+ }
+ } else {
+ if(strang.matches("\\[\\$\\S+\\]")) {
+ String varName = strang.substring(2, strang.length());
+
+ retList = vars.get(varName);
+ } else if(strang.matches("\\[\\$\\S+\\-\\S+\\]")) {
+ String[] varParts = strang.substring(1, strang.length()).split("-");
+
+ StringBuilder actualName = new StringBuilder("[");
+
+ for(String varPart : varParts) {
+ if(varPart.startsWith("$")) {
+ IList<String> varName = vars.get(varPart.substring(1));
+
+ if(varName.getSize() != 1) {
+ // @FIXME notify the user they did something wrong
+ }
+
+ actualName.append(varName.first() + "-");
+ } else {
+ actualName.append(varPart + "-");
+ }
+ }
+
+ // Trim trailing -
+ actualName.deleteCharAt(actualName.length() - 1);
+ actualName.append("]");
+
+ retList = gramm.generateGenericValues(actualName.toString(), (s) -> s, " ");
+ } else {
+ // @FIXME notify the user they did something wrong
+ retList.add(strang);
+ }
+ }
+
+ return retList;
+ };
+
+ gram.configureSpecial(specialPredicate, specialAction);
+
+ return gram;
+ } catch (IOException ioex) {
+ throw ioex;
+ }
+ }
+
+ private static void importRule(FunctionalStringTokenizer tokenizer, ReaderState state) {
+ String ruleName = tokenizer.nextToken();
+ String subgrammarName = tokenizer.nextToken();
+
+ state.addGrammarAlias(subgrammarName, ruleName);
+ }
+
+ private static void initialRule(FunctionalStringTokenizer tokenizer, ReaderState state) {
+ String initialRuleName = tokenizer.nextToken();
+
+ state.setInitialRule(initialRuleName);
+ }
+
+ private static void initPragmas() {
+ addSubgrammarPragmas();
+
+ reader.addPragma("debug", (tokenizer, state) -> {
+ debugGrammar(state);
+ });
+
+ reader.addPragma("uniform", (tokenizer, state) -> {
+ state.toggleUniformity();
+ });
+
+ reader.addPragma("initial-rule", RBGrammarReader::initialRule);
+
+ reader.addPragma("import-rule", RBGrammarReader::importRule);
+
+ reader.addPragma("remove-rule", RBGrammarReader::removeRule);
+
+ reader.addPragma("prefix-with", RBGrammarReader::prefixRule);
+ reader.addPragma("suffix-with", RBGrammarReader::suffixRule);
+
+ reader.addPragma("regex-rule", (tokenizer, state) -> {
+ String ruleName = tokenizer.nextToken();
+
+ IList<String> regx = tokenizer.toList();
+ Generex regex = new Generex(ListUtils.collapseTokens(regx));
+
+ state.addSpecialRule(ruleName, () -> {
+ return new FunctionalList<>(regex.random().split(" "));
+ });
+ });
+
+ reader.addPragma("range-rule", (tokenizer, state) -> {
+ String ruleName = tokenizer.nextToken();
+
+ int start = Integer.parseInt(tokenizer.nextToken());
+ int end = Integer.parseInt(tokenizer.nextToken());
+
+ state.addSpecialRule(ruleName, () -> {
+ return new FunctionalList<>(Integer.toString(
+ numgen.nextInt((end - start) + 1) + start));
+ });
+ });
+ }
+
+ private static void loadSubGrammar(FunctionalStringTokenizer stk, ReaderState rs) {
+ String subgrammarName = stk.nextToken();
+ String subgrammarPath = stk.nextToken();
+
+ rs.loadSubgrammar(subgrammarName, subgrammarPath);
+ }
+
+ private static void prefixRule(FunctionalStringTokenizer tokenizer, ReaderState state) {
+ String ruleName = tokenizer.nextToken();
+ String prefixToken = tokenizer.nextToken();
+
+ int additionalProbability = readOptionalProbability(tokenizer, state);
+
+ state.prefixRule(ruleName, prefixToken, additionalProbability);
+ }
+
+ private static void promoteGrammar(FunctionalStringTokenizer tokenizer, ReaderState state) {
+ String subgrammarName = tokenizer.nextToken();
+ String subordinateName = tokenizer.nextToken();
+
+ state.promoteGrammar(subgrammarName, subordinateName);
+ }
+
+ private static int readOptionalProbability(FunctionalStringTokenizer tokenizer, ReaderState state) {
+ if (state.isUniform()) {
+ return 1;
+ }
+
+ return Integer.parseInt(tokenizer.nextToken());
+ }
+
+ private static void removeRule(FunctionalStringTokenizer tokenizer, ReaderState state) {
+ String ruleName = tokenizer.nextToken();
+
+ state.deleteRule(ruleName);
+ }
+
+ private static void removeSubGrammar(FunctionalStringTokenizer tokenizer, ReaderState state) {
+ String subgrammarName = tokenizer.nextToken();
+
+ state.deleteSubgrammar(subgrammarName);
+ }
+
+ private static void saveGrammar(FunctionalStringTokenizer tokenizer, ReaderState state) {
+ String subgrammarName = tokenizer.nextToken();
+
+ state.saveSubgrammar(subgrammarName);
+ }
+
+ private static void setupReader() {
+ reader = new RuleBasedConfigReader<>(null, null, null);
+
+ reader.setStartRule((tokenizer, stateTokenPair) -> {
+ stateTokenPair.doWith((initToken, state) -> {
+ state.startNewRule(initToken);
+
+ doCase(tokenizer, state);
+ });
+ });
+
+ reader.setContinueRule((tokenizer, state) -> {
+ doCase(tokenizer, state);
+ });
+
+ reader.setEndRule((tokenizer) -> {
+ tokenizer.setCurrentRule(null);
+ });
+ }
+
+ private static void subordinateGrammar(FunctionalStringTokenizer tokenizer, ReaderState state) {
+ String grammarName = tokenizer.nextToken();
+
+ state.subordinateGrammar(grammarName);
+ }
+
+ private static void suffixRule(FunctionalStringTokenizer tokenizer, ReaderState state) {
+ String ruleName = tokenizer.nextToken();
+ String suffixToken = tokenizer.nextToken();
+
+ int additionalProbability = readOptionalProbability(tokenizer, state);
+
+ state.suffixRule(ruleName, suffixToken, additionalProbability);
+ }
+}
diff --git a/RGens/src/main/java/bjc/rgens/parser/ReaderState.java b/RGens/src/main/java/bjc/rgens/parser/ReaderState.java
new file mode 100644
index 0000000..89fde25
--- /dev/null
+++ b/RGens/src/main/java/bjc/rgens/parser/ReaderState.java
@@ -0,0 +1,306 @@
+package bjc.rgens.parser;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.Stack;
+import java.util.function.Supplier;
+
+import bjc.utils.funcdata.IList;
+import bjc.utils.gen.WeightedGrammar;
+
+/**
+ * Represents the internal state of reader
+ *
+ * @author ben
+ *
+ */
+public class ReaderState {
+ private WeightedGrammar<String> currentGrammar;
+
+ private Stack<WeightedGrammar<String>> grammarStack;
+
+ private String currentRule;
+
+ private boolean isUniform;
+
+ private Path currentDirectory;
+
+ /**
+ * Create a new reader state
+ *
+ * @param inputPath
+ * The path to this grammar
+ */
+ public ReaderState(Path inputPath) {
+ grammarStack = new Stack<>();
+
+ currentGrammar = new WeightedGrammar<>();
+
+ // Grammars start out uniform
+ isUniform = true;
+
+ currentDirectory = inputPath.getParent();
+ }
+
+ /**
+ * Get the rule names for the current grammar
+ *
+ * @return The rule names for the current grammar
+ */
+ public IList<String> getRuleNames() {
+ return currentGrammar.getRuleNames();
+ }
+
+ /**
+ * Check if this reader is currently in uniform mode
+ *
+ * @return Whether this grammar is in uniform mode
+ */
+ public boolean isUniform() {
+ return isUniform;
+ }
+
+ /**
+ * Set the current grammar to be the specified one
+ *
+ * @param newWorkingGrammar
+ * The new grammar to use
+ */
+ public void setCurrentGrammar(WeightedGrammar<String> newWorkingGrammar) {
+ currentGrammar = newWorkingGrammar;
+ }
+
+ /**
+ * Set the rule currently being worked on
+ *
+ * @param ruleName
+ * The rule currently being worked on
+ */
+ public void setCurrentRule(String ruleName) {
+ currentRule = ruleName;
+ }
+
+ /**
+ * Set the initial rule of this grammar
+ *
+ * @param ruleName
+ * The initial rule of this grammar
+ */
+ public void setInitialRule(String ruleName) {
+ currentGrammar.setInitialRule(ruleName);
+ }
+
+ /**
+ * Toggle this uniformity setting for this grammar
+ */
+ public void toggleUniformity() {
+ isUniform = !isUniform;
+ }
+
+ /**
+ * Start work on a new sub-grammar of the previous grammar
+ */
+ public void startNewSubgrammar() {
+ grammarStack.push(currentGrammar);
+
+ currentGrammar = new WeightedGrammar<>();
+ }
+
+ /**
+ * Move to editing the grammar that is the parent of this current one
+ */
+ public void editParent() {
+ currentGrammar = grammarStack.pop();
+ }
+
+ /**
+ * Add a case to the current grammar
+ *
+ * @param ruleProbability
+ * The probability for this case to occur
+ * @param ruleParts
+ * The parts that make up this case
+ */
+ public void addCase(int ruleProbability, IList<String> ruleParts) {
+ currentGrammar.addCase(currentRule, ruleProbability, ruleParts);
+ }
+
+ public void addSpecialRule(String ruleName, Supplier<IList<String>> cse) {
+ currentGrammar.addSpecialRule(ruleName, cse);
+ }
+ /**
+ * Edit a subgrammar of the current grammar
+ *
+ * @param subgrammarName
+ * The name of the subgrammar to edit
+ */
+ public void editSubgrammar(String subgrammarName) {
+ WeightedGrammar<String> subgrammar = currentGrammar.getSubgrammar(subgrammarName);
+
+ grammarStack.push(currentGrammar);
+
+ currentGrammar = subgrammar;
+ }
+
+ /**
+ * Start editing a new rule in the current grammar
+ *
+ * @param ruleName
+ * The name of the new rule to edit
+ */
+ public void startNewRule(String ruleName) {
+ currentGrammar.addRule(ruleName);
+
+ currentRule = ruleName;
+ }
+
+ /**
+ * Convert this package of state into a weighted grammar
+ *
+ * @return The grammar represented by this state
+ */
+ public WeightedGrammar<String> getGrammar() {
+ return currentGrammar;
+ }
+
+ /**
+ * Alias a current subgrammar to a new name
+ *
+ * @param subgrammarName
+ * The name of the subgrammar to alias
+ * @param subgrammarAlias
+ * The name of the alias for the subgrammar
+ */
+ public void addGrammarAlias(String subgrammarName, String subgrammarAlias) {
+ currentGrammar.addGrammarAlias(subgrammarName, subgrammarAlias);
+ }
+
+ /**
+ * Load a subgrammar into this one.
+ *
+ * @param subgrammarName
+ * The name to assign to the subgrammar
+ * @param subgrammarPath
+ * The path to load the subgrammar from
+ */
+ public void loadSubgrammar(String subgrammarName,
+ String subgrammarPath) {
+ Path loadPath = currentDirectory.resolve(subgrammarPath);
+
+ try {
+ WeightedGrammar<String> subgrammar = RBGrammarReader
+ .fromPath(loadPath);
+
+ currentGrammar.addSubgrammar(subgrammarName, subgrammar);
+ } catch (IOException ioex) {
+ PragmaErrorException peex = new PragmaErrorException(
+ "Couldn't load subgrammar " + subgrammarName + " from "
+ + subgrammarPath);
+
+ peex.initCause(ioex);
+
+ throw peex;
+ }
+ }
+
+ /**
+ * Prefix a token onto all of the cases for the specified rule
+ *
+ * @param ruleName
+ * The rule to do prefixing on
+ * @param prefixToken
+ * The token to prefix onto each case
+ * @param additionalProbability
+ * The probability modification of the prefixed cases
+ */
+ public void prefixRule(String ruleName, String prefixToken,
+ int additionalProbability) {
+ currentGrammar.prefixRule(ruleName, prefixToken,
+ additionalProbability);
+ }
+
+ /**
+ * Promote the specified subgrammar above the current grammar
+ *
+ * @param subgrammarName
+ * The name of the subgrammar to promote
+ * @param subordinateName
+ * The name to bind this grammar to the subgrammar under
+ */
+ public void promoteGrammar(String subgrammarName,
+ String subordinateName) {
+ WeightedGrammar<String> subgrammar = currentGrammar
+ .getSubgrammar(subgrammarName);
+
+ currentGrammar.deleteSubgrammar(subgrammarName);
+
+ subgrammar.addSubgrammar(subordinateName, currentGrammar);
+
+ currentGrammar = subgrammar;
+ }
+
+ /**
+ * Delete a rule from the current subgrammar
+ *
+ * @param ruleName
+ * The name of the rule to delete
+ */
+ public void deleteRule(String ruleName) {
+ currentGrammar.deleteRule(ruleName);
+ }
+
+ /**
+ * Delete a subgrammar from the current grammar
+ *
+ * @param subgrammarName
+ * The name of the subgrammar to delete
+ */
+ public void deleteSubgrammar(String subgrammarName) {
+ currentGrammar.deleteSubgrammar(subgrammarName);
+ }
+
+ /**
+ * Save the current grammar as a subgrammar of the previous one
+ *
+ * @param subgrammarName
+ * The name of the subgrammar to save this under
+ */
+ public void saveSubgrammar(String subgrammarName) {
+ WeightedGrammar<String> newWorkingGrammar = grammarStack.pop();
+
+ newWorkingGrammar.addSubgrammar(subgrammarName, currentGrammar);
+
+ currentGrammar = newWorkingGrammar;
+ }
+
+ /**
+ * Subordinate this grammar as a subgrammar to a new grammar
+ *
+ * @param grammarName
+ * The name for the subgrammar to bind the current grammar
+ * to
+ */
+ public void subordinateGrammar(String grammarName) {
+ WeightedGrammar<String> newWorkingGrammar = new WeightedGrammar<>();
+
+ newWorkingGrammar.addSubgrammar(grammarName, currentGrammar);
+
+ currentGrammar = newWorkingGrammar;
+ }
+
+ /**
+ * Suffix a token onto all of the cases for the specified rule
+ *
+ * @param ruleName
+ * The rule to do suffixing on
+ * @param suffixToken
+ * The token to suffix onto each case
+ * @param additionalProbability
+ * The probability modification of the suffixed cases
+ */
+ public void suffixRule(String ruleName, String suffixToken,
+ int additionalProbability) {
+ currentGrammar.suffixRule(ruleName, suffixToken,
+ additionalProbability);
+ }
+}