comparison Resources/CodeGeneration/stonegentool.py @ 482:f58fe38c8c04 bgo-commands-codegen

Ongoing work on codegen: ts and cpp enum and struct writing seem to be OK. No file write yet
author bgo-osimis
date Thu, 14 Feb 2019 20:58:42 +0100
parents 38997ceb9bc6
children 772516adcbf6
comparison
equal deleted inserted replaced
474:38997ceb9bc6 482:f58fe38c8c04
1 import json 1 import json
2 import re 2 import re
3 import sys 3 import sys
4 from typing import Dict, List, Set 4 from typing import Dict, List, Set
5 5 from io import StringIO
6 import time
6 7
7 """ 8 """
8 1 2 3 4 5 6 7 9 1 2 3 4 5 6 7
9 12345678901234567890123456789012345678901234567890123456789012345678901234567890 10 12345678901234567890123456789012345678901234567890123456789012345678901234567890
10 """ 11 """
11 12
12 import json 13 import json
13 import re 14 import re
14 15
15 """A set of utilities to perform JSON operation"""
16 16
17 class JsonHelpers: 17 class JsonHelpers:
18 """A set of utilities to perform JSON operations"""
19
18 @staticmethod 20 @staticmethod
19 def removeCommentsFromJsonContent(string): 21 def removeCommentsFromJsonContent(string):
20 """ 22 """
21 remove comments from a JSON file 23 Remove comments from a JSON file
22 24
23 Comments are not allowed in JSON but, i.e., Orthanc configuration files contains C++ like comments that we need to remove before python can parse the file 25 Comments are not allowed in JSON but, i.e., Orthanc configuration files
24 """ 26 contains C++ like comments that we need to remove before python can
25 string = re.sub(re.compile("/\*.*?\*/", re.DOTALL), "", 27 parse the file
26 string) # remove all occurance streamed comments (/*COMMENT */) from string 28 """
27 string = re.sub(re.compile("//.*?\n"), "", 29 # remove all occurence streamed comments (/*COMMENT */) from string
28 string) # remove all occurance singleline comments (//COMMENT\n ) from string 30 string = re.sub(re.compile("/\*.*?\*/", re.DOTALL), "", string)
31
32 # remove all occurence singleline comments (//COMMENT\n ) from string
33 string = re.sub(re.compile("//.*?\n"), "", string)
34
29 return string 35 return string
30 36
31 @staticmethod 37 @staticmethod
32 def loadJsonWithComments(path): 38 def loadJsonWithComments(path):
33 """ 39 """
34 reads a JSON file that may contain C++ like comments 40 Reads a JSON file that may contain C++ like comments
35 """ 41 """
36 with open(path, 'r') as fp: 42 with open(path, "r") as fp:
37 fileContent = fp.read() 43 fileContent = fp.read()
38 fileContent = JsonHelpers.removeCommentsFromJsonContent(fileContent) 44 fileContent = JsonHelpers.removeCommentsFromJsonContent(fileContent)
39 return json.loads(fileContent) 45 return json.loads(fileContent)
40 46
41 47
42 def LoadSchema(filePath : str): 48 def LoadSchema(filePath: str):
43 return JsonHelpers.loadJsonWithComments(filePath) 49 return JsonHelpers.loadJsonWithComments(filePath)
50
44 51
45 # class Type: 52 # class Type:
46 # def __init__(self, canonicalTypeName:str, kind:str): 53 # def __init__(self, canonicalTypeName:str, kind:str):
47 # allowedTypeKinds = ["primitive","enum","struct","collection"] 54 # allowedTypeKinds = ["primitive","enum","struct","collection"]
48 # """dependent type is the list of canonical types this type depends on. 55 # """dependent type is the list of canonical types this type depends on.
56 # self.dependentTypes = dependentTypes 63 # self.dependentTypes = dependentTypes
57 64
58 # def getDependentTypes(self) -> List[Type]: 65 # def getDependentTypes(self) -> List[Type]:
59 # return self.dependentTypes 66 # return self.dependentTypes
60 67
61 def GetCppTypeNameFromCanonical(canonicalTypeName : str) -> str: 68
62 # C++: prefix map vector and string with std::map, std::vector and 69 def GetCppTypeNameFromCanonical(canonicalTypeName: str) -> str:
70 # C++: prefix map vector and string with std::map, std::vector and
63 # std::string 71 # std::string
64 # replace int32 by int32_t 72 # replace int32 by int32_t
65 # replace float32 by float 73 # replace float32 by float
66 # replace float64 by double 74 # replace float64 by double
67 retVal : str = canonicalTypeName.replace("map","std::map") 75 retVal = canonicalTypeName
68 retVal : str = canonicalTypeName.replace("vector","std::vector") 76 retVal: str = retVal.replace("map", "std::map")
69 retVal : str = canonicalTypeName.replace("int32","int32_t") 77 retVal: str = retVal.replace("vector", "std::vector")
70 retVal : str = canonicalTypeName.replace("float32","float") 78 retVal: str = retVal.replace("int32", "int32_t")
71 retVal : str = canonicalTypeName.replace("float64","double") 79 retVal: str = retVal.replace("float32", "float")
80 retVal: str = retVal.replace("float64", "double")
72 return retVal 81 return retVal
73 82
74 def GetTypeScriptTypeNameFromCanonical(canonicalTypeName : str) -> str: 83 def GetTypeScriptTypeNameFromCanonical(canonicalTypeName: str) -> str:
75 # TS: replace vector with Array and map with Map 84 # TS: replace vector with Array and map with Map
76 # string remains string 85 # string remains string
77 # replace int32 by number 86 # replace int32 by number
78 # replace float32 by number 87 # replace float32 by number
79 # replace float64 by number 88 # replace float64 by number
80 retVal : str = canonicalTypeName.replace("map","Map") 89 retVal = canonicalTypeName
81 retVal : str = canonicalTypeName.replace("vector","Array") 90 retVal: str = retVal.replace("map", "Map")
82 retVal : str = canonicalTypeName.replace("int32","number") 91 retVal: str = retVal.replace("vector", "Array")
83 retVal : str = canonicalTypeName.replace("float32","number") 92 retVal: str = retVal.replace("int32", "number")
84 retVal : str = canonicalTypeName.replace("float64","number") 93 retVal: str = retVal.replace("float32", "number")
85 retVal : str = canonicalTypeName.replace("bool","boolean") 94 retVal: str = retVal.replace("float64", "number")
95 retVal: str = retVal.replace("bool", "boolean")
86 return retVal 96 return retVal
97
87 98
88 # class Schema: 99 # class Schema:
89 # def __init__(self, root_prefix : str, defined_types : List[Type]): 100 # def __init__(self, root_prefix : str, defined_types : List[Type]):
90 # self.rootName : str = root_prefix 101 # self.rootName : str = root_prefix
91 # self.definedTypes : str = defined_types 102 # self.definedTypes : str = defined_types
92 103
93 def CheckTypeSchema(definedType : Dict) -> None: 104
94 allowedDefinedTypeKinds = ["enum","struct"] 105 def CheckTypeSchema(definedType: Dict) -> None:
95 if not 'name' in definedType: 106 allowedDefinedTypeKinds = ["enum", "struct"]
96 raise Exception("type lacks the 'name' key") 107 if not "name" in definedType:
97 name = definedType['name'] 108 raise Exception("type lacks the 'name' key")
98 if not 'kind' in definedType: 109 name = definedType["name"]
99 raise Exception(f"type {name} lacks the 'kind' key") 110 if not "kind" in definedType:
100 kind = definedType['kind'] 111 raise Exception(f"type {name} lacks the 'kind' key")
101 if not (kind in allowedDefinedTypeKinds): 112 kind = definedType["kind"]
102 raise Exception(f"type {name} : kind {kind} is not allowed. " + 113 if not (kind in allowedDefinedTypeKinds):
103 f"It must be one of {allowedDefinedTypeKinds}") 114 raise Exception(
104 115 f"type {name} : kind {kind} is not allowed. "
105 if not 'fields' in definedType: 116 + f"It must be one of {allowedDefinedTypeKinds}"
106 raise Exception("type {name} lacks the 'fields' key") 117 )
107 118
108 # generic check on all kinds of types 119 if not "fields" in definedType:
109 fields = definedType['fields'] 120 raise Exception("type {name} lacks the 'fields' key")
110 for field in fields: 121
111 fieldName = field['name'] 122 # generic check on all kinds of types
112 if not 'name' in field: 123 fields = definedType["fields"]
113 raise Exception("field in type {name} lacks the 'name' key")
114
115 # fields in struct must have types
116 if kind == 'struct':
117 for field in fields: 124 for field in fields:
118 fieldName = field['name'] 125 fieldName = field["name"]
119 if not 'type' in field: 126 if not "name" in field:
120 raise Exception(f"field {fieldName} in type {name} " 127 raise Exception("field in type {name} lacks the 'name' key")
121 + "lacks the 'type' key") 128
122 129 # fields in struct must have types
123 def CheckSchemaSchema(schema : Dict) -> None: 130 if kind == "struct":
124 if not 'root_name' in schema: 131 for field in fields:
125 raise Exception("schema lacks the 'root_name' key") 132 fieldName = field["name"]
126 if not 'types' in schema: 133 if not "type" in field:
127 raise Exception("schema lacks the 'types' key") 134 raise Exception(
128 for definedType in schema['types']: 135 f"field {fieldName} in type {name} " + "lacks the 'type' key"
129 CheckTypeSchema(definedType) 136 )
137
138
139 def CheckSchemaSchema(schema: Dict) -> None:
140 if not "root_name" in schema:
141 raise Exception("schema lacks the 'root_name' key")
142 if not "types" in schema:
143 raise Exception("schema lacks the 'types' key")
144 for definedType in schema["types"]:
145 CheckTypeSchema(definedType)
146
130 147
131 # def CreateAndCacheTypeObject(allTypes : Dict[str,Type], typeDict : Dict) -> None: 148 # def CreateAndCacheTypeObject(allTypes : Dict[str,Type], typeDict : Dict) -> None:
132 # """This does not set the dependentTypes field""" 149 # """This does not set the dependentTypes field"""
133 # typeName : str = typeDict['name'] 150 # typeName : str = typeDict['name']
134 # if typeName in allTypes: 151 # if typeName in allTypes:
135 # raise Exception(f'Type {typeName} is defined more than once!') 152 # raise Exception(f'Type {typeName} is defined more than once!')
136 # else: 153 # else:
137 # typeObject = Type(typeName, typeDict['kind']) 154 # typeObject = Type(typeName, typeDict['kind'])
138 # allTypes[typeName] = typeObject 155 # allTypes[typeName] = typeObject
139 156
140 def EatToken(sentence : str) -> (str,str): 157
141 """splits "A,B,C" into "A" and "B,C" where A, B and C are type names 158 def EatToken(sentence: str) -> (str, str):
159 """splits "A,B,C" into "A" and "B,C" where A, B and C are type names
142 (including templates) like "int32", "TotoTutu", or 160 (including templates) like "int32", "TotoTutu", or
143 "map<map<int32,vector<string>>,map<string,int32>>" """ 161 "map<map<int32,vector<string>>,map<string,int32>>" """
144 162
145 if sentence.count('<') != sentence.count('>'): 163 if sentence.count("<") != sentence.count(">"):
146 raise Exception(f"Error in the partial template type list {sentence}." 164 raise Exception(
147 + " The number of < and > do not match!") 165 f"Error in the partial template type list {sentence}."
148 166 + " The number of < and > do not match!"
149 # the template level we're currently in 167 )
150 templateLevel = 0 168
151 for i in range(len(sentence)): 169 # the template level we're currently in
152 if (sentence[i] == ",") and (templateLevel == 0): 170 templateLevel = 0
153 return (sentence[0:i],sentence[i+1:]) 171 for i in range(len(sentence)):
154 elif (sentence[i] == "<"): 172 if (sentence[i] == ",") and (templateLevel == 0):
155 templateLevel += 1 173 return (sentence[0:i], sentence[i + 1 :])
156 elif (sentence[i] == ">"): 174 elif sentence[i] == "<":
157 templateLevel -= 1 175 templateLevel += 1
158 return (sentence,"") 176 elif sentence[i] == ">":
159 177 templateLevel -= 1
160 def SplitListOfTypes(typeName : str) -> List[str]: 178 return (sentence, "")
161 """Splits something like 179
180
181 def SplitListOfTypes(typeName: str) -> List[str]:
182 """Splits something like
162 vector<string>,int32,map<string,map<string,int32>> 183 vector<string>,int32,map<string,map<string,int32>>
163 in: 184 in:
164 - vector<string> 185 - vector<string>
165 - int32 186 - int32
166 map<string,map<string,int32>> 187 map<string,map<string,int32>>
167 188
168 This is not possible with a regex so 189 This is not possible with a regex so
169 """ 190 """
170 stillStuffToEat : bool = True 191 stillStuffToEat: bool = True
171 tokenList = [] 192 tokenList = []
172 restOfString = typeName 193 restOfString = typeName
173 while stillStuffToEat: 194 while stillStuffToEat:
174 firstToken,restOfString = EatToken(restOfString) 195 firstToken, restOfString = EatToken(restOfString)
175 tokenList.append(firstToken) 196 tokenList.append(firstToken)
176 if restOfString == "": 197 if restOfString == "":
177 stillStuffToEat = False 198 stillStuffToEat = False
178 return tokenList 199 return tokenList
179 200
180 templateRegex = \ 201
181 re.compile(r"([a-zA-Z0-9_]*[a-zA-Z0-9_]*)<([a-zA-Z0-9_,:<>]+)>") 202 templateRegex = re.compile(r"([a-zA-Z0-9_]*[a-zA-Z0-9_]*)<([a-zA-Z0-9_,:<>]+)>")
182 203
183 def ParseTemplateType(typeName) -> (bool,str,List[str]): 204
184 """ If the type is a template like "SOMETHING<SOME<THING,EL<SE>>>", then 205 def ParseTemplateType(typeName) -> (bool, str, List[str]):
206 """ If the type is a template like "SOMETHING<SOME<THING,EL<SE>>>", then
185 it returns (true,"SOMETHING","SOME<THING,EL<SE>>") 207 it returns (true,"SOMETHING","SOME<THING,EL<SE>>")
186 otherwise it returns (false,"","")""" 208 otherwise it returns (false,"","")"""
187 209
188 # let's remove all whitespace from the type 210 # let's remove all whitespace from the type
189 # split without argument uses any whitespace string as separator 211 # split without argument uses any whitespace string as separator
190 # (space, tab, newline, return or formfeed) 212 # (space, tab, newline, return or formfeed)
191 typeName = "".join(typeName.split()) 213 typeName = "".join(typeName.split())
192 matches = templateRegex.match(typeName) 214 matches = templateRegex.match(typeName)
193 if matches == None: 215 if matches == None:
194 return (False,"","") 216 return (False, "", "")
195 else: 217 else:
196 # we need to split with the commas that are outside of the defined types 218 # we need to split with the commas that are outside of the defined types
197 # simply splitting at commas won't work 219 # simply splitting at commas won't work
198 listOfDependentTypes = SplitListOfTypes(matches.group(2)) 220 listOfDependentTypes = SplitListOfTypes(matches.group(2))
199 return (True,matches.group(1),listOfDependentTypes) 221 return (True, matches.group(1), listOfDependentTypes)
222
200 223
201 # def GetPrimitiveType(typeName : str) -> Type: 224 # def GetPrimitiveType(typeName : str) -> Type:
202 # if typeName in allTypes: 225 # if typeName in allTypes:
203 # return allTypes[typeName] 226 # return allTypes[typeName]
204 # else: 227 # else:
209 # # there are no dependent types in a primitive type --> Type object 232 # # there are no dependent types in a primitive type --> Type object
210 # # constrution is finished at this point 233 # # constrution is finished at this point
211 # allTypes[typeName] = typeObject 234 # allTypes[typeName] = typeObject
212 # return typeObject 235 # return typeObject
213 236
237
214 def ProcessTypeTree( 238 def ProcessTypeTree(
215 ancestors : List[str] 239 ancestors: List[str],
216 , genOrderQueue : List[str] 240 genOrderQueue: List[str],
217 , structTypes : Dict[str,Dict], typeName : str) -> None: 241 structTypes: Dict[str, Dict],
218 if typeName in ancestors: 242 typeName: str,
219 raise Exception(f"Cyclic dependency chain found: the last of {ancestors} " 243 ) -> None:
220 + f"depends on {typeName} that is already in the list.") 244 if typeName in ancestors:
245 raise Exception(
246 f"Cyclic dependency chain found: the last of {ancestors} "
247 + f"depends on {typeName} that is already in the list."
248 )
249
250 if not (typeName in genOrderQueue):
251 # if we reach this point, it means the type is NOT a struct or an enum.
252 # it is another (non directly user-defined) type that we must parse and
253 # create. Let's do it!
254 (isTemplate, _, dependentTypeNames) = ParseTemplateType(typeName)
255 if isTemplate:
256 for dependentTypeName in dependentTypeNames:
257 # childAncestors = ancestors.copy() NO TEMPLATE ANCESTOR!!!
258 # childAncestors.append(typeName)
259 ProcessTypeTree(
260 ancestors, genOrderQueue, structTypes, dependentTypeName
261 )
262 else:
263 if typeName in structTypes:
264 ProcessStructType_DepthFirstRecursive(
265 genOrderQueue, structTypes, structTypes[typeName]
266 )
267
268
269 def ProcessStructType_DepthFirstRecursive(
270 genOrderQueue: List[str], structTypes: Dict[str, Dict], typeDict: Dict
271 ) -> None:
272 # let's generate the code according to the
273 typeName: str = typeDict["name"]
274 if typeDict["kind"] != "struct":
275 raise Exception(
276 f"Unexpected kind '{typeDict['kind']}' for " + "type '{typeName}'"
277 )
278 typeFields: List[Dict] = typeDict["fields"]
279 for typeField in typeFields:
280 ancestors = [typeName]
281 ProcessTypeTree(ancestors, genOrderQueue, structTypes, typeField["type"])
282 # now we're pretty sure our dependencies have been processed,
283 # we can start marking our code for generation (it might already have
284 # been done if someone referenced us earlier)
285 if not typeName in genOrderQueue:
286 genOrderQueue.append(typeName)
287
288 def ProcessEnumerationType(
289 outputStreams: Dict[str, StringIO], typeDict) -> None:
290 print(f"About to process struct: {typeDict['name']}")
291 tsText : io.StringIO = StringIO()
292 cppText : io.StringIO = StringIO()
221 293
222 if not (typeName in genOrderQueue): 294 tsText.write("enum %s\n" % typeDict['name'])
223 # if we reach this point, it means the type is NOT a struct or an enum. 295 tsText.write("{\n")
224 # it is another (non directly user-defined) type that we must parse and 296
225 # create. Let's do it! 297 cppText.write("enum %s\n" % typeDict['name'])
226 (isTemplate,_,dependentTypeNames) = ParseTemplateType(typeName) 298 cppText.write("{\n")
227 if isTemplate: 299
228 for dependentTypeName in dependentTypeNames: 300 for i in range(len(typeDict['fields'])):
229 # childAncestors = ancestors.copy() NO TEMPLATE ANCESTOR!!! 301 field = typeDict['fields'][i]
230 # childAncestors.append(typeName) 302 name = field['name']
231 ProcessTypeTree(ancestors, genOrderQueue, 303
232 structTypes, dependentTypeName) 304 tsText.write(" %s" % name)
233 else: 305 if i < len(typeDict['fields'])-1:
234 if typeName in structTypes: 306 tsText.write(",")
235 ProcessStructType_DepthFirstRecursive(genOrderQueue, structTypes, 307 tsText.write("\n")
236 structTypes[typeName]) 308
237 309 cppText.write(" %s" % name)
238 def ProcessStructType_DepthFirstRecursive( 310 if i < len(typeDict['fields'])-1:
239 genOrderQueue : List[str], structTypes : Dict[str,Dict] 311 cppText.write(",")
240 , typeDict : Dict) -> None: 312 cppText.write("\n")
241 # let's generate the code according to the 313
242 typeName : str = typeDict['name'] 314 tsText.write("};\n\n")
243 if typeDict['kind'] != 'struct': 315 cppText.write("};\n\n")
244 raise Exception(f"Unexpected kind '{typeDict['kind']}' for " + 316
245 "type '{typeName}'") 317 outputStreams['ts'].write(tsText.getvalue())
246 typeFields : List[Dict] = typeDict['fields'] 318 outputStreams['cpp'].write(cppText.getvalue())
247 for typeField in typeFields: 319
248 ancestors = [typeName] 320
249 ProcessTypeTree(ancestors, genOrderQueue 321 def ProcessStructType(
250 , structTypes, typeField['type']) 322 outputStreams: Dict[str, StringIO], typeDict) -> None:
251 # now we're pretty sure our dependencies have been processed, 323 print(f"About to process struct: {typeDict['name']}")
252 # we can start marking our code for generation 324 tsText : io.StringIO = StringIO()
253 genOrderQueue.add(typeName) 325 cppText : io.StringIO = StringIO()
254 326
255 def ProcessEnumerationType(definedType) -> (str,str): 327 tsText.write("class %s\n" % typeDict['name'])
256 print(f"About to process enumeration: {definedType['name']}") 328 tsText.write("{\n")
257 tsText : str = """import blah 329
258 330 cppText.write("struct %s\n" % typeDict['name'])
259 enum PROUT 331 cppText.write("{\n")
260 { 332
261 value1 333 for i in range(len(typeDict['fields'])):
262 value2 334 field = typeDict['fields'][i]
263 }""" 335 name = field['name']
264 336 tsType = GetTypeScriptTypeNameFromCanonical(field['type'])
265 cppText : str = """import blah 337 tsText.write(" public %s %s;\n" % (tsType, name))
266 338 cppType = GetCppTypeNameFromCanonical(field['type'])
267 enum PROUT 339 cppText.write(" %s %s;\n" % (cppType, name))
268 { 340
269 value1 341 tsText.write("};\n\n")
270 value2 342 cppText.write("};\n\n")
271 }""" 343
272 344 outputStreams['ts'].write(tsText.getvalue())
273 return (tsText,cppText) 345 outputStreams['cpp'].write(cppText.getvalue())
274 346
275 def ProcessStructType(typeDict) -> (str,str): 347 def WritePreambles(outputStreams: Dict[str, StringIO]) -> None:
276 print(f"About to process enumeration: {definedType['name']}") 348 outputStreams["cpp"].write("""// autogenerated by stonegentool on %s
277 tsText : str = """import blah 349 #include <cstdint>
278 350 #include <string>
279 class PROUT 351 #include <vector>
280 { 352 #include <map>
281 public value1 : Type1 353 """ % time.ctime())
282 public value2 : Type2 354
283 }""" 355 outputStreams["ts"].write("""// autogenerated by stonegentool on %s
284 356 """ % time.ctime())
285 cppText : str = """import blah 357
286 358 def ProcessSchema(schema: dict) -> (str, Dict[str, StringIO]):
287 class PROUT 359 CheckSchemaSchema(schema)
288 { 360 rootName: str = schema["root_name"]
289 public: 361 definedTypes: list = schema["types"]
290 Type1: value1 362
291 Type2: value2 363 print(f"Processing schema. rootName = f{rootName}")
292 }""" 364 # this will be filled with the generation queue. That is, the type
293 365 # names in the order where they must be defined.
294 return (tsText,cppText) 366 genOrderQueue: List = []
295 367
296 368 # the struct names are mapped to their JSON dictionary
297 369 structTypes: Dict[str, Dict] = {}
298 def ProcessSchema(schema : dict) -> None: 370
299 CheckSchemaSchema(schema) 371 outputStreams = {}
300 rootName : str = schema['root_name'] 372 outputStreams["cpp"] = StringIO()
301 definedTypes : list = schema['types'] 373 outputStreams["ts"] = StringIO()
302 374
303 print(f"Processing schema. rootName = f{rootName}") 375 WritePreambles(outputStreams)
304 # this will be filled with the generation queue. That is, the type 376
305 # names in the order where they must be defined. 377 # the order here is the generation order
306 genOrderQueue : Set = set() 378 for definedType in definedTypes:
307 379 if definedType["kind"] == "enum":
308 # the struct names are mapped to their JSON dictionary 380 ProcessEnumerationType(outputStreams, definedType)
309 structTypes : Dict[str,Dict] = {} 381
310 382 for definedType in definedTypes:
311 # the order here is the generation order 383 if definedType["kind"] == "struct":
312 for definedType in definedTypes: 384 structTypes[definedType["name"]] = definedType
313 if definedType['kind'] == 'enum': 385
314 ProcessEnumerationType(definedType) 386 # the order here is NOT the generation order: the types
315 387 # will be processed according to their dependency graph
316 for definedType in definedTypes: 388 for definedType in definedTypes:
317 if definedType['kind'] == 'struct': 389 if definedType["kind"] == "struct":
318 structTypes[definedType['name']] = definedType 390 ProcessStructType_DepthFirstRecursive(
319 391 genOrderQueue, structTypes, definedType
320 # the order here is NOT the generation order: the types 392 )
321 # will be processed according to their dependency graph 393
322 for definedType in definedTypes: 394 for i in range(len(genOrderQueue)):
323 if definedType['kind'] == 'struct': 395 typeName = genOrderQueue[i]
324 ProcessStructType_DepthFirstRecursive(genOrderQueue,structTypes, 396 typeDict = structTypes[typeName]
325 definedType) 397 ProcessStructType(outputStreams, typeDict)
326 398
327 for i in range(len(genOrderQueue)) 399 return (rootName, outputStreams)
328 typeName = genOrderQueue[i] 400
329 typeDict = structTypes[typeName] 401
330 ProcessStructType(typeDict) 402 if __name__ == "__main__":
331 403 import argparse
332 if __name__ == '__main__': 404
333 import argparse 405 parser = argparse.ArgumentParser(
334 parser = argparse.ArgumentParser( 406 usage="""stonegentool.py [-h] [-o OUT_DIR] [-v] input_schemas
335 usage = """stonegentool.py [-h] [-o OUT_DIR] [-v] input_schemas
336 EXAMPLE: python command_gen.py -o "generated_files/" """ 407 EXAMPLE: python command_gen.py -o "generated_files/" """
337 + """ "mainSchema.json,App Specific Commands.json" """) 408 + """ "mainSchema.json,App Specific Commands.json" """
338 parser.add_argument("input_schema", type=str, 409 )
339 help = "path to the schema file") 410 parser.add_argument("input_schema", type=str, help="path to the schema file")
340 parser.add_argument("-o", "--out_dir", type=str, default=".", 411 parser.add_argument(
341 help = """path of the directory where the files 412 "-o",
413 "--out_dir",
414 type=str,
415 default=".",
416 help="""path of the directory where the files
342 will be generated. Default is current 417 will be generated. Default is current
343 working folder""") 418 working folder""",
344 parser.add_argument("-v", "--verbosity", action="count", default=0, 419 )
345 help = """increase output verbosity (0 == errors 420 parser.add_argument(
421 "-v",
422 "--verbosity",
423 action="count",
424 default=0,
425 help="""increase output verbosity (0 == errors
346 only, 1 == some verbosity, 2 == nerd 426 only, 1 == some verbosity, 2 == nerd
347 mode""") 427 mode""",
348 428 )
349 args = parser.parse_args() 429
350 inputSchemaFilename = args.input_schema 430 args = parser.parse_args()
351 outDir = args.out_dir 431 inputSchemaFilename = args.input_schema
352 432 outDir = args.out_dir
353 print("input schema = " + str(inputSchemaFilename)) 433
354 print("out dir = " + str(outDir)) 434 print("input schema = " + str(inputSchemaFilename))
355 435 print("out dir = " + str(outDir))
356 ProcessSchema(LoadSchema(inputSchemaFilename)) 436
357 437 (rootName, outputStreams) = ProcessSchema(LoadSchema(inputSchemaFilename))
438 WriteStreamsToFiles(rootName, outputStreams)
358 439
359 ################### 440 ###################
360 ## ATTIC ## 441 ## ATTIC ##
361 ################### 442 ###################
362 443
363 # this works 444 # this works
364 445
365 if False: 446 if False:
366 obj = json.loads("""{ 447 obj = json.loads(
448 """{
367 "firstName": "Alice", 449 "firstName": "Alice",
368 "lastName": "Hall", 450 "lastName": "Hall",
369 "age": 35 451 "age": 35
370 }""") 452 }"""
371 print(obj) 453 )
454 print(obj)
455