changeset 482:f58fe38c8c04 bgo-commands-codegen

Ongoing work on codegen: ts and cpp enum and struct writing seem to be OK. No file write yet
author bgo-osimis
date Thu, 14 Feb 2019 20:58:42 +0100
parents 38997ceb9bc6
children 772516adcbf6
files Resources/CodeGeneration/stonegentool.py Resources/CodeGeneration/stonegentool_test.py
diffstat 2 files changed, 318 insertions(+), 227 deletions(-) [+]
line wrap: on
line diff
--- a/Resources/CodeGeneration/stonegentool.py	Wed Feb 13 20:42:26 2019 +0100
+++ b/Resources/CodeGeneration/stonegentool.py	Thu Feb 14 20:58:42 2019 +0100
@@ -2,7 +2,8 @@
 import re
 import sys
 from typing import Dict, List, Set
-
+from io import StringIO
+import time
 
 """
          1         2         3         4         5         6         7
@@ -12,36 +13,42 @@
 import json
 import re
 
-"""A set of utilities to perform JSON operation"""
 
 class JsonHelpers:
+    """A set of utilities to perform JSON operations"""
+
     @staticmethod
     def removeCommentsFromJsonContent(string):
         """
-        remove comments from a JSON file
+      Remove comments from a JSON file
 
-        Comments are not allowed in JSON but, i.e., Orthanc configuration files contains C++ like comments that we need to remove before python can parse the file
-        """
-        string = re.sub(re.compile("/\*.*?\*/", re.DOTALL), "",
-                        string)  # remove all occurance streamed comments (/*COMMENT */) from string
-        string = re.sub(re.compile("//.*?\n"), "",
-                        string)  # remove all occurance singleline comments (//COMMENT\n ) from string
+      Comments are not allowed in JSON but, i.e., Orthanc configuration files
+      contains C++ like comments that we need to remove before python can
+      parse the file
+      """
+        # remove all occurence streamed comments (/*COMMENT */) from string
+        string = re.sub(re.compile("/\*.*?\*/", re.DOTALL), "", string)
+
+        # remove all occurence singleline comments (//COMMENT\n ) from string
+        string = re.sub(re.compile("//.*?\n"), "", string)
+
         return string
 
     @staticmethod
     def loadJsonWithComments(path):
         """
-        reads a JSON file that may contain C++ like comments
-        """
-        with open(path, 'r') as fp:
+      Reads a JSON file that may contain C++ like comments
+      """
+        with open(path, "r") as fp:
             fileContent = fp.read()
         fileContent = JsonHelpers.removeCommentsFromJsonContent(fileContent)
         return json.loads(fileContent)
 
 
-def LoadSchema(filePath : str):
+def LoadSchema(filePath: str):
     return JsonHelpers.loadJsonWithComments(filePath)
 
+
 # class Type:
 #   def __init__(self, canonicalTypeName:str, kind:str):
 #     allowedTypeKinds = ["primitive","enum","struct","collection"]
@@ -58,75 +65,85 @@
 #   def getDependentTypes(self) -> List[Type]:
 #     return self.dependentTypes
 
-def GetCppTypeNameFromCanonical(canonicalTypeName : str) -> str:
-    # C++: prefix map vector and string with std::map, std::vector and 
+
+def GetCppTypeNameFromCanonical(canonicalTypeName: str) -> str:
+    # C++: prefix map vector and string with std::map, std::vector and
     # std::string
     # replace int32 by int32_t
     # replace float32 by float
     # replace float64 by double
-    retVal : str = canonicalTypeName.replace("map","std::map")
-    retVal : str = canonicalTypeName.replace("vector","std::vector")
-    retVal : str = canonicalTypeName.replace("int32","int32_t")
-    retVal : str = canonicalTypeName.replace("float32","float")
-    retVal : str = canonicalTypeName.replace("float64","double")
+    retVal = canonicalTypeName
+    retVal: str = retVal.replace("map", "std::map")
+    retVal: str = retVal.replace("vector", "std::vector")
+    retVal: str = retVal.replace("int32", "int32_t")
+    retVal: str = retVal.replace("float32", "float")
+    retVal: str = retVal.replace("float64", "double")
     return retVal
 
-def GetTypeScriptTypeNameFromCanonical(canonicalTypeName : str) -> str:
+def GetTypeScriptTypeNameFromCanonical(canonicalTypeName: str) -> str:
     # TS: replace vector with Array and map with Map
     # string remains string
     # replace int32 by number
     # replace float32 by number
     # replace float64 by number
-    retVal : str = canonicalTypeName.replace("map","Map")
-    retVal : str = canonicalTypeName.replace("vector","Array")
-    retVal : str = canonicalTypeName.replace("int32","number")
-    retVal : str = canonicalTypeName.replace("float32","number")
-    retVal : str = canonicalTypeName.replace("float64","number")
-    retVal : str = canonicalTypeName.replace("bool","boolean")
+    retVal = canonicalTypeName
+    retVal: str = retVal.replace("map", "Map")
+    retVal: str = retVal.replace("vector", "Array")
+    retVal: str = retVal.replace("int32", "number")
+    retVal: str = retVal.replace("float32", "number")
+    retVal: str = retVal.replace("float64", "number")
+    retVal: str = retVal.replace("bool", "boolean")
     return retVal
 
+
 # class Schema:
 #   def __init__(self, root_prefix : str, defined_types : List[Type]):
 #     self.rootName : str = root_prefix
 #     self.definedTypes : str = defined_types
 
-def CheckTypeSchema(definedType : Dict) -> None:
-  allowedDefinedTypeKinds = ["enum","struct"]
-  if not 'name' in definedType:
-    raise Exception("type lacks the 'name' key")
-  name = definedType['name']
-  if not 'kind' in definedType:
-    raise Exception(f"type {name} lacks the 'kind' key")
-  kind = definedType['kind']
-  if not (kind in allowedDefinedTypeKinds):
-    raise Exception(f"type {name} : kind {kind} is not allowed. " + 
-      f"It must be one of {allowedDefinedTypeKinds}")
-  
-  if not 'fields' in definedType:
-    raise Exception("type {name} lacks the 'fields' key")
+
+def CheckTypeSchema(definedType: Dict) -> None:
+    allowedDefinedTypeKinds = ["enum", "struct"]
+    if not "name" in definedType:
+        raise Exception("type lacks the 'name' key")
+    name = definedType["name"]
+    if not "kind" in definedType:
+        raise Exception(f"type {name} lacks the 'kind' key")
+    kind = definedType["kind"]
+    if not (kind in allowedDefinedTypeKinds):
+        raise Exception(
+            f"type {name} : kind {kind} is not allowed. "
+            + f"It must be one of {allowedDefinedTypeKinds}"
+        )
+
+    if not "fields" in definedType:
+        raise Exception("type {name} lacks the 'fields' key")
 
-  # generic check on all kinds of types
-  fields = definedType['fields']
-  for field in fields:
-    fieldName = field['name']
-    if not 'name' in field:
-      raise Exception("field in type {name} lacks the 'name' key")
-
-  # fields in struct must have types
-  if kind == 'struct':  
+    # generic check on all kinds of types
+    fields = definedType["fields"]
     for field in fields:
-      fieldName = field['name']
-      if not 'type' in field:
-        raise Exception(f"field {fieldName} in type {name} "
-        + "lacks the 'type' key")
+        fieldName = field["name"]
+        if not "name" in field:
+            raise Exception("field in type {name} lacks the 'name' key")
 
-def CheckSchemaSchema(schema : Dict) -> None:
-  if not 'root_name' in schema:
-    raise Exception("schema lacks the 'root_name' key")
-  if not 'types' in schema:
-    raise Exception("schema lacks the 'types' key")
-  for definedType in schema['types']:
-    CheckTypeSchema(definedType)
+    # fields in struct must have types
+    if kind == "struct":
+        for field in fields:
+            fieldName = field["name"]
+            if not "type" in field:
+                raise Exception(
+                    f"field {fieldName} in type {name} " + "lacks the 'type' key"
+                )
+
+
+def CheckSchemaSchema(schema: Dict) -> None:
+    if not "root_name" in schema:
+        raise Exception("schema lacks the 'root_name' key")
+    if not "types" in schema:
+        raise Exception("schema lacks the 'types' key")
+    for definedType in schema["types"]:
+        CheckTypeSchema(definedType)
+
 
 # def CreateAndCacheTypeObject(allTypes : Dict[str,Type], typeDict : Dict)  -> None:
 #   """This does not set the dependentTypes field"""
@@ -137,28 +154,32 @@
 #     typeObject = Type(typeName, typeDict['kind'])
 #     allTypes[typeName] = typeObject
 
-def EatToken(sentence : str) -> (str,str):
-  """splits "A,B,C" into "A" and "B,C" where A, B and C are type names
+
+def EatToken(sentence: str) -> (str, str):
+    """splits "A,B,C" into "A" and "B,C" where A, B and C are type names
   (including templates) like "int32", "TotoTutu", or 
   "map<map<int32,vector<string>>,map<string,int32>>" """
 
-  if sentence.count('<') != sentence.count('>'):
-    raise Exception(f"Error in the partial template type list {sentence}."
-      + " The number of < and > do not match!")
+    if sentence.count("<") != sentence.count(">"):
+        raise Exception(
+            f"Error in the partial template type list {sentence}."
+            + " The number of < and > do not match!"
+        )
 
-  # the template level we're currently in
-  templateLevel = 0
-  for i in range(len(sentence)):
-    if (sentence[i] == ",") and (templateLevel == 0):
-      return (sentence[0:i],sentence[i+1:])
-    elif (sentence[i] == "<"):
-      templateLevel += 1
-    elif (sentence[i] == ">"):
-      templateLevel -= 1
-  return (sentence,"")
+    # the template level we're currently in
+    templateLevel = 0
+    for i in range(len(sentence)):
+        if (sentence[i] == ",") and (templateLevel == 0):
+            return (sentence[0:i], sentence[i + 1 :])
+        elif sentence[i] == "<":
+            templateLevel += 1
+        elif sentence[i] == ">":
+            templateLevel -= 1
+    return (sentence, "")
 
-def SplitListOfTypes(typeName : str) -> List[str]:
-  """Splits something like
+
+def SplitListOfTypes(typeName: str) -> List[str]:
+    """Splits something like
   vector<string>,int32,map<string,map<string,int32>> 
   in:
   - vector<string>
@@ -167,36 +188,38 @@
   
   This is not possible with a regex so 
   """
-  stillStuffToEat : bool = True
-  tokenList = []
-  restOfString = typeName
-  while stillStuffToEat:
-    firstToken,restOfString = EatToken(restOfString)
-    tokenList.append(firstToken)
-    if restOfString == "":
-      stillStuffToEat = False
-  return tokenList
+    stillStuffToEat: bool = True
+    tokenList = []
+    restOfString = typeName
+    while stillStuffToEat:
+        firstToken, restOfString = EatToken(restOfString)
+        tokenList.append(firstToken)
+        if restOfString == "":
+            stillStuffToEat = False
+    return tokenList
 
-templateRegex = \
-  re.compile(r"([a-zA-Z0-9_]*[a-zA-Z0-9_]*)<([a-zA-Z0-9_,:<>]+)>")
+
+templateRegex = re.compile(r"([a-zA-Z0-9_]*[a-zA-Z0-9_]*)<([a-zA-Z0-9_,:<>]+)>")
 
-def ParseTemplateType(typeName) -> (bool,str,List[str]):
-  """ If the type is a template like "SOMETHING<SOME<THING,EL<SE>>>", then 
+
+def ParseTemplateType(typeName) -> (bool, str, List[str]):
+    """ If the type is a template like "SOMETHING<SOME<THING,EL<SE>>>", then 
   it returns (true,"SOMETHING","SOME<THING,EL<SE>>")
   otherwise it returns (false,"","")"""
-  
-  # let's remove all whitespace from the type
-  # split without argument uses any whitespace string as separator
-  # (space, tab, newline, return or formfeed)
-  typeName = "".join(typeName.split())
-  matches = templateRegex.match(typeName)
-  if matches == None:
-    return (False,"","")
-  else:
-    # we need to split with the commas that are outside of the defined types
-    # simply splitting at commas won't work
-    listOfDependentTypes = SplitListOfTypes(matches.group(2))
-    return (True,matches.group(1),listOfDependentTypes)
+
+    # let's remove all whitespace from the type
+    # split without argument uses any whitespace string as separator
+    # (space, tab, newline, return or formfeed)
+    typeName = "".join(typeName.split())
+    matches = templateRegex.match(typeName)
+    if matches == None:
+        return (False, "", "")
+    else:
+        # we need to split with the commas that are outside of the defined types
+        # simply splitting at commas won't work
+        listOfDependentTypes = SplitListOfTypes(matches.group(2))
+        return (True, matches.group(1), listOfDependentTypes)
+
 
 # def GetPrimitiveType(typeName : str) -> Type:
 #   if typeName in allTypes:
@@ -211,161 +234,222 @@
 #     allTypes[typeName] = typeObject
 #     return typeObject
 
+
 def ProcessTypeTree(
-     ancestors : List[str]
-   , genOrderQueue : List[str]
-   , structTypes : Dict[str,Dict], typeName : str) -> None:
-  if typeName in ancestors:
-    raise Exception(f"Cyclic dependency chain found: the last of {ancestors} "
-      + f"depends on {typeName} that is already in the list.")
-  
-  if not (typeName in genOrderQueue):
-    # if we reach this point, it means the type is NOT a struct or an enum.
-    # it is another (non directly user-defined) type that we must parse and 
-    # create. Let's do it!
-    (isTemplate,_,dependentTypeNames) = ParseTemplateType(typeName)
-    if isTemplate:
-      for dependentTypeName in dependentTypeNames:
-        # childAncestors = ancestors.copy()  NO TEMPLATE ANCESTOR!!!
-        # childAncestors.append(typeName)
-        ProcessTypeTree(ancestors, genOrderQueue,
-          structTypes, dependentTypeName)
-    else:
-      if typeName in structTypes:
-        ProcessStructType_DepthFirstRecursive(genOrderQueue, structTypes,
-        structTypes[typeName])
+    ancestors: List[str],
+    genOrderQueue: List[str],
+    structTypes: Dict[str, Dict],
+    typeName: str,
+) -> None:
+    if typeName in ancestors:
+        raise Exception(
+            f"Cyclic dependency chain found: the last of {ancestors} "
+            + f"depends on {typeName} that is already in the list."
+        )
+
+    if not (typeName in genOrderQueue):
+        # if we reach this point, it means the type is NOT a struct or an enum.
+        # it is another (non directly user-defined) type that we must parse and
+        # create. Let's do it!
+        (isTemplate, _, dependentTypeNames) = ParseTemplateType(typeName)
+        if isTemplate:
+            for dependentTypeName in dependentTypeNames:
+                # childAncestors = ancestors.copy()  NO TEMPLATE ANCESTOR!!!
+                # childAncestors.append(typeName)
+                ProcessTypeTree(
+                    ancestors, genOrderQueue, structTypes, dependentTypeName
+                )
+        else:
+            if typeName in structTypes:
+                ProcessStructType_DepthFirstRecursive(
+                    genOrderQueue, structTypes, structTypes[typeName]
+                )
+
 
 def ProcessStructType_DepthFirstRecursive(
-    genOrderQueue : List[str], structTypes : Dict[str,Dict]
-  , typeDict : Dict) -> None:
-    # let's generate the code according to the 
-    typeName : str = typeDict['name']
-    if typeDict['kind'] != 'struct':
-      raise Exception(f"Unexpected kind '{typeDict['kind']}' for " +
-      "type '{typeName}'")
-    typeFields : List[Dict] = typeDict['fields']
+    genOrderQueue: List[str], structTypes: Dict[str, Dict], typeDict: Dict
+) -> None:
+    # let's generate the code according to the
+    typeName: str = typeDict["name"]
+    if typeDict["kind"] != "struct":
+        raise Exception(
+            f"Unexpected kind '{typeDict['kind']}' for " + "type '{typeName}'"
+        )
+    typeFields: List[Dict] = typeDict["fields"]
     for typeField in typeFields:
-      ancestors = [typeName]
-      ProcessTypeTree(ancestors, genOrderQueue
-        , structTypes, typeField['type'])
+        ancestors = [typeName]
+        ProcessTypeTree(ancestors, genOrderQueue, structTypes, typeField["type"])
     # now we're pretty sure our dependencies have been processed,
-    # we can start marking our code for generation
-    genOrderQueue.add(typeName)
-
-def ProcessEnumerationType(definedType) -> (str,str):
-  print(f"About to process enumeration: {definedType['name']}")
-  tsText : str = """import blah
+    # we can start marking our code for generation (it might already have
+    # been done if someone referenced us earlier)
+    if not typeName in genOrderQueue:
+      genOrderQueue.append(typeName)
 
-  enum PROUT
-  {
-    value1
-    value2
-  }"""
+def ProcessEnumerationType(
+  outputStreams: Dict[str, StringIO], typeDict) -> None:
+  print(f"About to process struct: {typeDict['name']}")
+  tsText : io.StringIO = StringIO()
+  cppText : io.StringIO = StringIO()
+  
+  tsText.write("enum %s\n" % typeDict['name'])
+  tsText.write("{\n")
 
-  cppText : str = """import blah
+  cppText.write("enum %s\n" % typeDict['name'])
+  cppText.write("{\n")
+
+  for i in range(len(typeDict['fields'])):
+    field = typeDict['fields'][i]
+    name = field['name']
+
+    tsText.write("    %s" % name)
+    if i < len(typeDict['fields'])-1:
+      tsText.write(",")
+    tsText.write("\n")
 
-  enum PROUT
-  {
-    value1
-    value2
-  }"""
+    cppText.write("    %s" % name)
+    if i < len(typeDict['fields'])-1:
+      cppText.write(",")
+    cppText.write("\n")
+  
+  tsText.write("};\n\n")
+  cppText.write("};\n\n")
 
-  return (tsText,cppText)
+  outputStreams['ts'].write(tsText.getvalue())
+  outputStreams['cpp'].write(cppText.getvalue())
+
 
-def ProcessStructType(typeDict) -> (str,str):
-  print(f"About to process enumeration: {definedType['name']}")
-  tsText : str = """import blah
+def ProcessStructType(
+  outputStreams: Dict[str, StringIO], typeDict) -> None:
+  print(f"About to process struct: {typeDict['name']}")
+  tsText : io.StringIO = StringIO()
+  cppText : io.StringIO = StringIO()
+  
+  tsText.write("class %s\n" % typeDict['name'])
+  tsText.write("{\n")
 
-  class PROUT
-  {
-    public value1 : Type1
-    public value2 : Type2
-  }"""
-
-  cppText : str = """import blah
+  cppText.write("struct %s\n" % typeDict['name'])
+  cppText.write("{\n")
 
-  class PROUT
-  {
-    public:
-    Type1: value1
-    Type2: value2
-  }"""
+  for i in range(len(typeDict['fields'])):
+    field = typeDict['fields'][i]
+    name = field['name']
+    tsType = GetTypeScriptTypeNameFromCanonical(field['type'])
+    tsText.write("    public %s %s;\n" % (tsType, name))
+    cppType = GetCppTypeNameFromCanonical(field['type'])
+    cppText.write("    %s %s;\n" % (cppType, name))
+  
+  tsText.write("};\n\n")
+  cppText.write("};\n\n")
 
-  return (tsText,cppText)
+  outputStreams['ts'].write(tsText.getvalue())
+  outputStreams['cpp'].write(cppText.getvalue())
 
-    
+def WritePreambles(outputStreams: Dict[str, StringIO]) -> None:
+    outputStreams["cpp"].write("""// autogenerated by stonegentool on %s
+#include <cstdint>
+#include <string>
+#include <vector>
+#include <map>
+""" % time.ctime())
 
-def ProcessSchema(schema : dict) -> None:
-  CheckSchemaSchema(schema)
-  rootName : str = schema['root_name']
-  definedTypes : list = schema['types']
+    outputStreams["ts"].write("""// autogenerated by stonegentool on %s
+""" % time.ctime())
+
+def ProcessSchema(schema: dict) -> (str, Dict[str, StringIO]):
+    CheckSchemaSchema(schema)
+    rootName: str = schema["root_name"]
+    definedTypes: list = schema["types"]
 
-  print(f"Processing schema. rootName = f{rootName}")
-  # this will be filled with the generation queue. That is, the type
-  # names in the order where they must be defined.
-  genOrderQueue : Set = set()
+    print(f"Processing schema. rootName = f{rootName}")
+    # this will be filled with the generation queue. That is, the type
+    # names in the order where they must be defined.
+    genOrderQueue: List = []
 
-  # the struct names are mapped to their JSON dictionary
-  structTypes : Dict[str,Dict] = {}
+    # the struct names are mapped to their JSON dictionary
+    structTypes: Dict[str, Dict] = {}
 
-  # the order here is the generation order
-  for definedType in definedTypes:
-    if definedType['kind'] == 'enum':
-      ProcessEnumerationType(definedType)
+    outputStreams = {}
+    outputStreams["cpp"] = StringIO()
+    outputStreams["ts"] = StringIO()
+
+    WritePreambles(outputStreams)
+
+    # the order here is the generation order
+    for definedType in definedTypes:
+        if definedType["kind"] == "enum":
+            ProcessEnumerationType(outputStreams, definedType)
 
-  for definedType in definedTypes:
-    if definedType['kind'] == 'struct':
-      structTypes[definedType['name']] = definedType
+    for definedType in definedTypes:
+        if definedType["kind"] == "struct":
+            structTypes[definedType["name"]] = definedType
 
-  # the order here is NOT the generation order: the types
-  # will be processed according to their dependency graph
-  for definedType in definedTypes:
-    if definedType['kind'] == 'struct':
-      ProcessStructType_DepthFirstRecursive(genOrderQueue,structTypes,
-        definedType)
+    # the order here is NOT the generation order: the types
+    # will be processed according to their dependency graph
+    for definedType in definedTypes:
+        if definedType["kind"] == "struct":
+            ProcessStructType_DepthFirstRecursive(
+                genOrderQueue, structTypes, definedType
+            )
 
-  for i in range(len(genOrderQueue))
-    typeName = genOrderQueue[i]
-    typeDict = structTypes[typeName]
-    ProcessStructType(typeDict)
+    for i in range(len(genOrderQueue)):
+        typeName = genOrderQueue[i]
+        typeDict = structTypes[typeName]
+        ProcessStructType(outputStreams, typeDict)
+
+    return (rootName, outputStreams)
+
+
+if __name__ == "__main__":
+    import argparse
 
-if __name__ == '__main__':
-  import argparse
-  parser = argparse.ArgumentParser(
-    usage = """stonegentool.py [-h] [-o OUT_DIR] [-v] input_schemas
+    parser = argparse.ArgumentParser(
+        usage="""stonegentool.py [-h] [-o OUT_DIR] [-v] input_schemas
        EXAMPLE: python command_gen.py -o "generated_files/" """
-       + """ "mainSchema.json,App Specific Commands.json" """)
-  parser.add_argument("input_schema", type=str,
-                      help = "path to the schema file")
-  parser.add_argument("-o", "--out_dir", type=str, default=".", 
-                      help = """path of the directory where the files 
+        + """ "mainSchema.json,App Specific Commands.json" """
+    )
+    parser.add_argument("input_schema", type=str, help="path to the schema file")
+    parser.add_argument(
+        "-o",
+        "--out_dir",
+        type=str,
+        default=".",
+        help="""path of the directory where the files 
                                 will be generated. Default is current
-                                working folder""")
-  parser.add_argument("-v", "--verbosity", action="count", default=0,
-                      help = """increase output verbosity (0 == errors 
+                                working folder""",
+    )
+    parser.add_argument(
+        "-v",
+        "--verbosity",
+        action="count",
+        default=0,
+        help="""increase output verbosity (0 == errors 
                                 only, 1 == some verbosity, 2 == nerd
-                                mode""")
+                                mode""",
+    )
 
-  args = parser.parse_args()
-  inputSchemaFilename = args.input_schema
-  outDir = args.out_dir
+    args = parser.parse_args()
+    inputSchemaFilename = args.input_schema
+    outDir = args.out_dir
 
-  print("input schema = " + str(inputSchemaFilename))
-  print("out dir = " + str(outDir))
+    print("input schema = " + str(inputSchemaFilename))
+    print("out dir = " + str(outDir))
 
-  ProcessSchema(LoadSchema(inputSchemaFilename))
-  
+    (rootName, outputStreams) = ProcessSchema(LoadSchema(inputSchemaFilename))
+    WriteStreamsToFiles(rootName, outputStreams)
 
 ###################
 ##     ATTIC     ##
 ###################
 
-# this works 
+# this works
 
 if False:
-  obj = json.loads("""{
+    obj = json.loads(
+        """{
     "firstName": "Alice",
     "lastName": "Hall",
     "age": 35
-  }""")
-  print(obj)
+  }"""
+    )
+    print(obj)
+
--- a/Resources/CodeGeneration/stonegentool_test.py	Wed Feb 13 20:42:26 2019 +0100
+++ b/Resources/CodeGeneration/stonegentool_test.py	Thu Feb 14 20:58:42 2019 +0100
@@ -89,11 +89,18 @@
   def test_GenOrderQueue(self):
     fn = os.path.join(os.path.dirname(__file__), 'test', 'test1.jsonc')
     obj = LoadSchema(fn)
-    genOrderQueue,structTypes = ProcessSchema(obj)
-    print(f"genOrderQueue = {genOrderQueue}")
-    print("")
+    genOrderQueue, outputStreams = ProcessSchema(obj)
+    self.assertEqual(3,len(genOrderQueue))
+    self.assertEqual("A",genOrderQueue[0])
+    self.assertEqual("B",genOrderQueue[0])
+    self.assertEqual("C",genOrderQueue[0])
+    #print(f"genOrderQueue = {genOrderQueue}")
+    #print("")
 
   def test_GenerateTypeScriptEnumeration(self):
+    fn = os.path.join(os.path.dirname(__file__), 'test', 'test1.jsonc')
+    obj = LoadSchema(fn)
+    (rootName,outputStreams) = ProcessSchema(obj)
     pass
 
   def test_GenerateCppEnumeration(self):