comparison Resources/CodeGeneration/stonegentool.py @ 485:772516adcbf6 bgo-commands-codegen

Ongoing work on code generation. Enums and structs OK in ts and cpp
author bgo-osimis
date Fri, 15 Feb 2019 12:07:09 +0100
parents f58fe38c8c04
children 8e40355a172b
comparison
equal deleted inserted replaced
482:f58fe38c8c04 485:772516adcbf6
1 import json 1 import json
2 import re 2 import re
3 import sys 3 import sys
4 from typing import Dict, List, Set 4 from typing import (
5 Any,
6 Dict,
7 Generator,
8 Iterable,
9 Iterator,
10 List,
11 Match,
12 Optional,
13 Tuple,
14 Union,
15 cast,
16 )
5 from io import StringIO 17 from io import StringIO
6 import time 18 import time
7 19
8 """ 20 """
9 1 2 3 4 5 6 7 21 1 2 3 4 5 6 7
24 36
25 Comments are not allowed in JSON but, i.e., Orthanc configuration files 37 Comments are not allowed in JSON but, i.e., Orthanc configuration files
26 contains C++ like comments that we need to remove before python can 38 contains C++ like comments that we need to remove before python can
27 parse the file 39 parse the file
28 """ 40 """
29 # remove all occurence streamed comments (/*COMMENT */) from string 41 # remove all occurrence streamed comments (/*COMMENT */) from string
30 string = re.sub(re.compile("/\*.*?\*/", re.DOTALL), "", string) 42 string = re.sub(re.compile("/\*.*?\*/", re.DOTALL), "", string)
31 43
32 # remove all occurence singleline comments (//COMMENT\n ) from string 44 # remove all occurrence singleline comments (//COMMENT\n ) from string
33 string = re.sub(re.compile("//.*?\n"), "", string) 45 string = re.sub(re.compile("//.*?\n"), "", string)
34 46
35 return string 47 return string
36 48
37 @staticmethod 49 @staticmethod
70 # C++: prefix map vector and string with std::map, std::vector and 82 # C++: prefix map vector and string with std::map, std::vector and
71 # std::string 83 # std::string
72 # replace int32 by int32_t 84 # replace int32 by int32_t
73 # replace float32 by float 85 # replace float32 by float
74 # replace float64 by double 86 # replace float64 by double
75 retVal = canonicalTypeName 87 retVal: str = canonicalTypeName
76 retVal: str = retVal.replace("map", "std::map") 88 retVal = retVal.replace("map", "std::map")
77 retVal: str = retVal.replace("vector", "std::vector") 89 retVal = retVal.replace("vector", "std::vector")
78 retVal: str = retVal.replace("int32", "int32_t") 90 retVal = retVal.replace("int32", "int32_t")
79 retVal: str = retVal.replace("float32", "float") 91 retVal = retVal.replace("float32", "float")
80 retVal: str = retVal.replace("float64", "double") 92 retVal = retVal.replace("float64", "double")
81 return retVal 93 return retVal
82 94
83 def GetTypeScriptTypeNameFromCanonical(canonicalTypeName: str) -> str: 95 def GetTypeScriptTypeNameFromCanonical(canonicalTypeName: str) -> str:
84 # TS: replace vector with Array and map with Map 96 # TS: replace vector with Array and map with Map
85 # string remains string 97 # string remains string
86 # replace int32 by number 98 # replace int32 by number
87 # replace float32 by number 99 # replace float32 by number
88 # replace float64 by number 100 # replace float64 by number
89 retVal = canonicalTypeName 101 retVal: str = canonicalTypeName
90 retVal: str = retVal.replace("map", "Map") 102 retVal = retVal.replace("map", "Map")
91 retVal: str = retVal.replace("vector", "Array") 103 retVal = retVal.replace("vector", "Array")
92 retVal: str = retVal.replace("int32", "number") 104 retVal = retVal.replace("int32", "number")
93 retVal: str = retVal.replace("float32", "number") 105 retVal = retVal.replace("float32", "number")
94 retVal: str = retVal.replace("float64", "number") 106 retVal = retVal.replace("float64", "number")
95 retVal: str = retVal.replace("bool", "boolean") 107 retVal = retVal.replace("bool", "boolean")
96 return retVal 108 return retVal
97 109
98 110
99 # class Schema: 111 # class Schema:
100 # def __init__(self, root_prefix : str, defined_types : List[Type]): 112 # def __init__(self, root_prefix : str, defined_types : List[Type]):
153 # else: 165 # else:
154 # typeObject = Type(typeName, typeDict['kind']) 166 # typeObject = Type(typeName, typeDict['kind'])
155 # allTypes[typeName] = typeObject 167 # allTypes[typeName] = typeObject
156 168
157 169
158 def EatToken(sentence: str) -> (str, str): 170 def EatToken(sentence: str) -> Tuple[str, str]:
159 """splits "A,B,C" into "A" and "B,C" where A, B and C are type names 171 """splits "A,B,C" into "A" and "B,C" where A, B and C are type names
160 (including templates) like "int32", "TotoTutu", or 172 (including templates) like "int32", "TotoTutu", or
161 "map<map<int32,vector<string>>,map<string,int32>>" """ 173 "map<map<int32,vector<string>>,map<string,int32>>" """
162 174
163 if sentence.count("<") != sentence.count(">"): 175 if sentence.count("<") != sentence.count(">"):
200 212
201 213
202 templateRegex = re.compile(r"([a-zA-Z0-9_]*[a-zA-Z0-9_]*)<([a-zA-Z0-9_,:<>]+)>") 214 templateRegex = re.compile(r"([a-zA-Z0-9_]*[a-zA-Z0-9_]*)<([a-zA-Z0-9_,:<>]+)>")
203 215
204 216
205 def ParseTemplateType(typeName) -> (bool, str, List[str]): 217 def ParseTemplateType(typeName) -> Tuple[bool, str, List[str]]:
206 """ If the type is a template like "SOMETHING<SOME<THING,EL<SE>>>", then 218 """ If the type is a template like "SOMETHING<SOME<THING,EL<SE>>>", then
207 it returns (true,"SOMETHING","SOME<THING,EL<SE>>") 219 it returns (true,"SOMETHING","SOME<THING,EL<SE>>")
208 otherwise it returns (false,"","")""" 220 otherwise it returns (false,"","")"""
209 221
210 # let's remove all whitespace from the type 222 # let's remove all whitespace from the type
211 # split without argument uses any whitespace string as separator 223 # split without argument uses any whitespace string as separator
212 # (space, tab, newline, return or formfeed) 224 # (space, tab, newline, return or formfeed)
213 typeName = "".join(typeName.split()) 225 typeName = "".join(typeName.split())
214 matches = templateRegex.match(typeName) 226 matches = templateRegex.match(typeName)
215 if matches == None: 227 if matches == None:
216 return (False, "", "") 228 return (False, "", [])
217 else: 229 else:
230 m = cast(Match[str], matches)
231 assert(len(m.groups()) == 2)
218 # we need to split with the commas that are outside of the defined types 232 # we need to split with the commas that are outside of the defined types
219 # simply splitting at commas won't work 233 # simply splitting at commas won't work
220 listOfDependentTypes = SplitListOfTypes(matches.group(2)) 234 listOfDependentTypes = SplitListOfTypes(m.group(2))
221 return (True, matches.group(1), listOfDependentTypes) 235 return (True, m.group(1), listOfDependentTypes)
222 236
223 237
224 # def GetPrimitiveType(typeName : str) -> Type: 238 # def GetPrimitiveType(typeName : str) -> Type:
225 # if typeName in allTypes: 239 # if typeName in allTypes:
226 # return allTypes[typeName] 240 # return allTypes[typeName]
284 # been done if someone referenced us earlier) 298 # been done if someone referenced us earlier)
285 if not typeName in genOrderQueue: 299 if not typeName in genOrderQueue:
286 genOrderQueue.append(typeName) 300 genOrderQueue.append(typeName)
287 301
288 def ProcessEnumerationType( 302 def ProcessEnumerationType(
289 outputStreams: Dict[str, StringIO], typeDict) -> None: 303 outputStreams: GeneratedCode, typeDict: Dict) -> None:
290 print(f"About to process struct: {typeDict['name']}") 304 tsText : StringIO = StringIO()
291 tsText : io.StringIO = StringIO() 305 cppText : StringIO = StringIO()
292 cppText : io.StringIO = StringIO()
293 306
294 tsText.write("enum %s\n" % typeDict['name']) 307 tsText.write("enum %s\n" % typeDict['name'])
295 tsText.write("{\n") 308 tsText.write("{\n")
296 309
297 cppText.write("enum %s\n" % typeDict['name']) 310 cppText.write("enum %s\n" % typeDict['name'])
317 outputStreams['ts'].write(tsText.getvalue()) 330 outputStreams['ts'].write(tsText.getvalue())
318 outputStreams['cpp'].write(cppText.getvalue()) 331 outputStreams['cpp'].write(cppText.getvalue())
319 332
320 333
321 def ProcessStructType( 334 def ProcessStructType(
322 outputStreams: Dict[str, StringIO], typeDict) -> None: 335 outputStreams: GeneratedCode, typeDict) -> None:
323 print(f"About to process struct: {typeDict['name']}") 336 tsText : StringIO = StringIO()
324 tsText : io.StringIO = StringIO() 337 cppText : StringIO = StringIO()
325 cppText : io.StringIO = StringIO()
326 338
327 tsText.write("class %s\n" % typeDict['name']) 339 tsText.write("class %s\n" % typeDict['name'])
328 tsText.write("{\n") 340 tsText.write("{\n")
329 341
330 cppText.write("struct %s\n" % typeDict['name']) 342 cppText.write("struct %s\n" % typeDict['name'])
342 cppText.write("};\n\n") 354 cppText.write("};\n\n")
343 355
344 outputStreams['ts'].write(tsText.getvalue()) 356 outputStreams['ts'].write(tsText.getvalue())
345 outputStreams['cpp'].write(cppText.getvalue()) 357 outputStreams['cpp'].write(cppText.getvalue())
346 358
347 def WritePreambles(outputStreams: Dict[str, StringIO]) -> None: 359
348 outputStreams["cpp"].write("""// autogenerated by stonegentool on %s 360 def WritePreambles(rootName: str, outputStreams: GeneratedCode) -> None:
361 outputStreams.cppPreamble.write("""// autogenerated by stonegentool on %s for module %s
349 #include <cstdint> 362 #include <cstdint>
350 #include <string> 363 #include <string>
351 #include <vector> 364 #include <vector>
352 #include <map> 365 #include <map>
353 """ % time.ctime()) 366 """ % (time.ctime(),rootName))
354 367
355 outputStreams["ts"].write("""// autogenerated by stonegentool on %s 368 outputStreams.tsPreamble.write("""// autogenerated by stonegentool on %s for module %s
356 """ % time.ctime()) 369 """ % (time.ctime(),rootName))
357 370
358 def ProcessSchema(schema: dict) -> (str, Dict[str, StringIO]): 371 class GeneratedCode:
372 def __init__(self):
373 self.cppPreamble = StringIO() # file-wide preamble (#include directives, comment...)
374 self.cppEnums = StringIO()
375 self.cppStructs = StringIO()
376 self.cppDispatcher = StringIO()
377 self.cppHandler = StringIO()
378
379 self.tsPreamble = StringIO() # file-wide preamble (module directives, comment...)
380 self.tsEnums = StringIO()
381 self.tsStructs = StringIO()
382 self.tsDispatcher = StringIO()
383 self.tsHandler = StringIO()
384
385 def FlattenToFiles(self,outputDir: str):
386 raise NotImplementedError()
387
388 def ProcessSchema(schema: dict) -> Tuple[str, GeneratedCode, List[str]]:
359 CheckSchemaSchema(schema) 389 CheckSchemaSchema(schema)
360 rootName: str = schema["root_name"] 390 rootName: str = schema["root_name"]
361 definedTypes: list = schema["types"] 391 definedTypes: list = schema["types"]
362 392
363 print(f"Processing schema. rootName = f{rootName}")
364 # this will be filled with the generation queue. That is, the type 393 # this will be filled with the generation queue. That is, the type
365 # names in the order where they must be defined. 394 # names in the order where they must be defined.
366 genOrderQueue: List = [] 395 genOrderQueue: List = []
367 396
368 # the struct names are mapped to their JSON dictionary 397 # the struct names are mapped to their JSON dictionary
369 structTypes: Dict[str, Dict] = {} 398 structTypes: Dict[str, Dict] = {}
370 399
371 outputStreams = {} 400 outputStreams : GeneratedCode = GeneratedCode()
372 outputStreams["cpp"] = StringIO() 401
373 outputStreams["ts"] = StringIO() 402 WritePreambles(rootName, outputStreams)
374
375 WritePreambles(outputStreams)
376 403
377 # the order here is the generation order 404 # the order here is the generation order
378 for definedType in definedTypes: 405 for definedType in definedTypes:
379 if definedType["kind"] == "enum": 406 if definedType["kind"] == "enum":
380 ProcessEnumerationType(outputStreams, definedType) 407 ProcessEnumerationType(outputStreams, definedType)
394 for i in range(len(genOrderQueue)): 421 for i in range(len(genOrderQueue)):
395 typeName = genOrderQueue[i] 422 typeName = genOrderQueue[i]
396 typeDict = structTypes[typeName] 423 typeDict = structTypes[typeName]
397 ProcessStructType(outputStreams, typeDict) 424 ProcessStructType(outputStreams, typeDict)
398 425
399 return (rootName, outputStreams) 426 return (rootName, outputStreams, genOrderQueue)
400 427
428
429
430 def WriteStreamsToFiles(rootName: str, outputStreams: Dict[str, StringIO]) -> None:
431 pass
401 432
402 if __name__ == "__main__": 433 if __name__ == "__main__":
403 import argparse 434 import argparse
404 435
405 parser = argparse.ArgumentParser( 436 parser = argparse.ArgumentParser(
406 usage="""stonegentool.py [-h] [-o OUT_DIR] [-v] input_schemas 437 usage="""stonegentool.py [-h] [-o OUT_DIR] [-v] input_schemas
407 EXAMPLE: python command_gen.py -o "generated_files/" """ 438 EXAMPLE: python command_gen.py -o "generated_files/" """
408 + """ "mainSchema.json,App Specific Commands.json" """ 439 + """ "mainSchema.json,App Specific Commands.json" """
409 ) 440 )
410 parser.add_argument("input_schema", type=str, help="path to the schema file") 441 parser.add_argument("input_schema", type=str, help="path to the schema file")
411 parser.add_argument( 442 parser.add_argument(
412 "-o", 443 "-o",
413 "--out_dir", 444 "--out_dir",
414 type=str, 445 type=str,
415 default=".", 446 default=".",
416 help="""path of the directory where the files 447 help="""path of the directory where the files
417 will be generated. Default is current 448 will be generated. Default is current
418 working folder""", 449 working folder""",
419 ) 450 )
420 parser.add_argument( 451 parser.add_argument(
421 "-v", 452 "-v",
422 "--verbosity", 453 "--verbosity",
423 action="count", 454 action="count",
424 default=0, 455 default=0,
425 help="""increase output verbosity (0 == errors 456 help="""increase output verbosity (0 == errors
426 only, 1 == some verbosity, 2 == nerd 457 only, 1 == some verbosity, 2 == nerd
427 mode""", 458 mode""",
428 ) 459 )
429 460
430 args = parser.parse_args() 461 args = parser.parse_args()
431 inputSchemaFilename = args.input_schema 462 inputSchemaFilename = args.input_schema
432 outDir = args.out_dir 463 outDir = args.out_dir
433 464
434 print("input schema = " + str(inputSchemaFilename)) 465 (rootName, outputStreams, _) = ProcessSchema(LoadSchema(inputSchemaFilename))
435 print("out dir = " + str(outDir)) 466 WriteStreamsToFiles(rootName, outputStreams)
436 467
437 (rootName, outputStreams) = ProcessSchema(LoadSchema(inputSchemaFilename))
438 WriteStreamsToFiles(rootName, outputStreams)
439
440 ###################
441 ## ATTIC ##
442 ###################
443
444 # this works
445
446 if False:
447 obj = json.loads(
448 """{
449 "firstName": "Alice",
450 "lastName": "Hall",
451 "age": 35
452 }"""
453 )
454 print(obj)
455