471
|
1 import json
|
|
2 import re
|
473
|
3 import sys
|
|
4 from typing import Dict, List, Set
|
|
5
|
469
|
6
|
472
|
7 """
|
|
8 1 2 3 4 5 6 7
|
|
9 12345678901234567890123456789012345678901234567890123456789012345678901234567890
|
|
10 """
|
|
11
|
473
|
12 import json
|
|
13 import re
|
|
14
|
|
15 """A set of utilities to perform JSON operation"""
|
|
16
|
|
17 class JsonHelpers:
|
|
18 @staticmethod
|
|
19 def removeCommentsFromJsonContent(string):
|
|
20 """
|
|
21 remove comments from a JSON file
|
|
22
|
|
23 Comments are not allowed in JSON but, i.e., Orthanc configuration files contains C++ like comments that we need to remove before python can parse the file
|
|
24 """
|
|
25 string = re.sub(re.compile("/\*.*?\*/", re.DOTALL), "",
|
|
26 string) # remove all occurance streamed comments (/*COMMENT */) from string
|
|
27 string = re.sub(re.compile("//.*?\n"), "",
|
|
28 string) # remove all occurance singleline comments (//COMMENT\n ) from string
|
|
29 return string
|
|
30
|
|
31 @staticmethod
|
|
32 def loadJsonWithComments(path):
|
|
33 """
|
|
34 reads a JSON file that may contain C++ like comments
|
|
35 """
|
|
36 with open(path, 'r') as fp:
|
|
37 fileContent = fp.read()
|
|
38 fileContent = JsonHelpers.removeCommentsFromJsonContent(fileContent)
|
|
39 return json.loads(fileContent)
|
|
40
|
|
41
|
|
42 def LoadSchema(filePath : str):
|
|
43 return JsonHelpers.loadJsonWithComments(filePath)
|
469
|
44
|
472
|
45 # class Type:
|
|
46 # def __init__(self, canonicalTypeName:str, kind:str):
|
|
47 # allowedTypeKinds = ["primitive","enum","struct","collection"]
|
|
48 # """dependent type is the list of canonical types this type depends on.
|
|
49 # For instance, vector<map<string,int32>> depends on map<string,int32>
|
|
50 # that, in turn, depends on string and int32 that, in turn, depend on
|
|
51 # nothing"""
|
|
52 # self.canonicalTypeName = canonicalTypeName
|
|
53 # assert(kind in allowedTypeKinds)
|
470
|
54
|
472
|
55 # def setDependentTypes(self, dependentTypes:List[Type]) -> None:
|
|
56 # self.dependentTypes = dependentTypes
|
470
|
57
|
472
|
58 # def getDependentTypes(self) -> List[Type]:
|
|
59 # return self.dependentTypes
|
470
|
60
|
472
|
61 def GetCppTypeNameFromCanonical(canonicalTypeName : str) -> str:
|
|
62 # C++: prefix map vector and string with std::map, std::vector and
|
|
63 # std::string
|
469
|
64 # replace int32 by int32_t
|
|
65 # replace float32 by float
|
|
66 # replace float64 by double
|
472
|
67 retVal : str = canonicalTypeName.replace("map","std::map")
|
|
68 retVal : str = canonicalTypeName.replace("vector","std::vector")
|
|
69 retVal : str = canonicalTypeName.replace("int32","int32_t")
|
|
70 retVal : str = canonicalTypeName.replace("float32","float")
|
|
71 retVal : str = canonicalTypeName.replace("float64","double")
|
469
|
72 return retVal
|
470
|
73
|
472
|
74 def GetTypeScriptTypeNameFromCanonical(canonicalTypeName : str) -> str:
|
469
|
75 # TS: replace vector with Array and map with Map
|
|
76 # string remains string
|
|
77 # replace int32 by number
|
|
78 # replace float32 by number
|
|
79 # replace float64 by number
|
472
|
80 retVal : str = canonicalTypeName.replace("map","Map")
|
|
81 retVal : str = canonicalTypeName.replace("vector","Array")
|
|
82 retVal : str = canonicalTypeName.replace("int32","number")
|
|
83 retVal : str = canonicalTypeName.replace("float32","number")
|
|
84 retVal : str = canonicalTypeName.replace("float64","number")
|
|
85 retVal : str = canonicalTypeName.replace("bool","boolean")
|
469
|
86 return retVal
|
|
87
|
472
|
88 # class Schema:
|
|
89 # def __init__(self, root_prefix : str, defined_types : List[Type]):
|
|
90 # self.rootName : str = root_prefix
|
|
91 # self.definedTypes : str = defined_types
|
470
|
92
|
|
93 def CheckTypeSchema(definedType : Dict) -> None:
|
|
94 allowedDefinedTypeKinds = ["enum","struct"]
|
473
|
95 if not 'name' in definedType:
|
470
|
96 raise Exception("type lacks the 'name' key")
|
|
97 name = definedType['name']
|
473
|
98 if not 'kind' in definedType:
|
470
|
99 raise Exception(f"type {name} lacks the 'kind' key")
|
|
100 kind = definedType['kind']
|
|
101 if not (kind in allowedDefinedTypeKinds):
|
472
|
102 raise Exception(f"type {name} : kind {kind} is not allowed. " +
|
|
103 f"It must be one of {allowedDefinedTypeKinds}")
|
470
|
104
|
473
|
105 if not 'fields' in definedType:
|
470
|
106 raise Exception("type {name} lacks the 'fields' key")
|
|
107
|
|
108 # generic check on all kinds of types
|
|
109 fields = definedType['fields']
|
|
110 for field in fields:
|
|
111 fieldName = field['name']
|
473
|
112 if not 'name' in field:
|
470
|
113 raise Exception("field in type {name} lacks the 'name' key")
|
469
|
114
|
470
|
115 # fields in struct must have types
|
|
116 if kind == 'struct':
|
|
117 for field in fields:
|
|
118 fieldName = field['name']
|
473
|
119 if not 'type' in field:
|
472
|
120 raise Exception(f"field {fieldName} in type {name} "
|
|
121 + "lacks the 'type' key")
|
470
|
122
|
|
123 def CheckSchemaSchema(schema : Dict) -> None:
|
473
|
124 if not 'root_name' in schema:
|
470
|
125 raise Exception("schema lacks the 'root_name' key")
|
473
|
126 if not 'types' in schema:
|
470
|
127 raise Exception("schema lacks the 'types' key")
|
|
128 for definedType in schema['types']:
|
|
129 CheckTypeSchema(definedType)
|
|
130
|
472
|
131 # def CreateAndCacheTypeObject(allTypes : Dict[str,Type], typeDict : Dict) -> None:
|
|
132 # """This does not set the dependentTypes field"""
|
|
133 # typeName : str = typeDict['name']
|
473
|
134 # if typeName in allTypes:
|
472
|
135 # raise Exception(f'Type {typeName} is defined more than once!')
|
|
136 # else:
|
|
137 # typeObject = Type(typeName, typeDict['kind'])
|
|
138 # allTypes[typeName] = typeObject
|
471
|
139
|
|
140 def EatToken(sentence : str) -> (str,str):
|
|
141 """splits "A,B,C" into "A" and "B,C" where A, B and C are type names
|
|
142 (including templates) like "int32", "TotoTutu", or
|
|
143 "map<map<int32,vector<string>>,map<string,int32>>" """
|
472
|
144
|
471
|
145 if sentence.count('<') != sentence.count('>'):
|
472
|
146 raise Exception(f"Error in the partial template type list {sentence}."
|
|
147 + " The number of < and > do not match!")
|
471
|
148
|
|
149 # the template level we're currently in
|
|
150 templateLevel = 0
|
473
|
151 for i in range(len(sentence)):
|
471
|
152 if (sentence[i] == ",") and (templateLevel == 0):
|
|
153 return (sentence[0:i],sentence[i+1:])
|
|
154 elif (sentence[i] == "<"):
|
|
155 templateLevel += 1
|
|
156 elif (sentence[i] == ">"):
|
|
157 templateLevel -= 1
|
|
158 return (sentence,"")
|
|
159
|
|
160 def SplitListOfTypes(typeName : str) -> List[str]:
|
|
161 """Splits something like
|
|
162 vector<string>,int32,map<string,map<string,int32>>
|
|
163 in:
|
|
164 - vector<string>
|
|
165 - int32
|
|
166 map<string,map<string,int32>>
|
|
167
|
|
168 This is not possible with a regex so
|
|
169 """
|
|
170 stillStuffToEat : bool = True
|
|
171 tokenList = []
|
|
172 restOfString = typeName
|
|
173 while stillStuffToEat:
|
|
174 firstToken,restOfString = EatToken(restOfString)
|
|
175 tokenList.append(firstToken)
|
473
|
176 if restOfString == "":
|
|
177 stillStuffToEat = False
|
471
|
178 return tokenList
|
|
179
|
472
|
180 templateRegex = \
|
|
181 re.compile(r"([a-zA-Z0-9_]*[a-zA-Z0-9_]*)<([a-zA-Z0-9_,:<>]+)>")
|
|
182
|
471
|
183 def ParseTemplateType(typeName) -> (bool,str,List[str]):
|
470
|
184 """ If the type is a template like "SOMETHING<SOME<THING,EL<SE>>>", then
|
|
185 it returns (true,"SOMETHING","SOME<THING,EL<SE>>")
|
|
186 otherwise it returns (false,"","")"""
|
|
187
|
471
|
188 # let's remove all whitespace from the type
|
|
189 # split without argument uses any whitespace string as separator
|
|
190 # (space, tab, newline, return or formfeed)
|
|
191 typeName = "".join(typeName.split())
|
|
192 matches = templateRegex.match(typeName)
|
|
193 if matches == None:
|
|
194 return (False,"","")
|
|
195 else:
|
|
196 # we need to split with the commas that are outside of the defined types
|
|
197 # simply splitting at commas won't work
|
|
198 listOfDependentTypes = SplitListOfTypes(matches.group(2))
|
|
199 return (True,matches.group(1),listOfDependentTypes)
|
470
|
200
|
472
|
201 # def GetPrimitiveType(typeName : str) -> Type:
|
473
|
202 # if typeName in allTypes:
|
472
|
203 # return allTypes[typeName]
|
|
204 # else:
|
|
205 # primitiveTypes = ['int32', 'float32', 'float64', 'string']
|
|
206 # if not (typeName in primitiveTypes):
|
|
207 # raise Exception(f"Type {typeName} is unknown.")
|
|
208 # typeObject = Type(typeName,'primitive')
|
|
209 # # there are no dependent types in a primitive type --> Type object
|
|
210 # # constrution is finished at this point
|
|
211 # allTypes[typeName] = typeObject
|
|
212 # return typeObject
|
470
|
213
|
471
|
214 def ProcessTypeTree(
|
|
215 ancestors : List[str]
|
472
|
216 , genOrderQueue : List[str]
|
471
|
217 , structTypes : Dict[str,Dict], typeName : str) -> None:
|
|
218 if typeName in ancestors:
|
472
|
219 raise Exception(f"Cyclic dependency chain found: the last of {ancestors} "
|
|
220 + f"depends on {typeName} that is already in the list.")
|
470
|
221
|
472
|
222 if not (typeName in genOrderQueue):
|
471
|
223 # if we reach this point, it means the type is NOT a struct or an enum.
|
472
|
224 # it is another (non directly user-defined) type that we must parse and
|
|
225 # create. Let's do it!
|
|
226 (isTemplate,_,parameters) = ParseTemplateType(typeName)
|
471
|
227 if isTemplate:
|
|
228 dependentTypeNames : List[str] = SplitListOfTypes(parameters)
|
|
229 for dependentTypeName in dependentTypeNames:
|
|
230 # childAncestors = ancestors.copy() NO TEMPLATE ANCESTOR!!!
|
|
231 # childAncestors.append(typeName)
|
472
|
232 ProcessTypeTree(ancestors, genOrderQueue,
|
|
233 structTypes, dependentTypeName)
|
471
|
234 else:
|
473
|
235 if typeName in structTypes:
|
472
|
236 ProcessStructType_DepthFirstRecursive(genOrderQueue, structTypes,
|
471
|
237 structTypes[typeName])
|
470
|
238
|
472
|
239 def ProcessStructType_DepthFirstRecursive(
|
|
240 genOrderQueue : List[str], structTypes : Dict[str,Dict]
|
471
|
241 , typeDict : Dict) -> None:
|
|
242 # let's generate the code according to the
|
|
243 typeName : str = typeDict['name']
|
|
244 if typeDict['kind'] != 'struct':
|
|
245 raise Exception(f"Unexpected kind '{typeDict['kind']}' for " +
|
|
246 "type '{typeName}'")
|
|
247 typeFields : List[Dict] = typeDict['fields']
|
|
248 for typeField in typeFields:
|
|
249 ancestors = [typeName]
|
472
|
250 ProcessTypeTree(ancestors, genOrderQueue
|
471
|
251 , structTypes, typeField['name'])
|
|
252 # now we're pretty sure our dependencies have been processed,
|
|
253 # we can start marking our code for generation
|
472
|
254 genOrderQueue.append(typeName)
|
|
255
|
|
256 def ProcessEnumerationType(processedTypes, definedType) -> None:
|
|
257 print(f"About to process enumeration: {definedType['name']}")
|
470
|
258
|
|
259 def ProcessSchema(schema : dict) -> None:
|
|
260 CheckSchemaSchema(schema)
|
|
261 rootName : str = schema['root_name']
|
|
262 definedTypes : list = schema['types']
|
|
263
|
472
|
264 print(f"Processing schema. rootName = f{rootName}")
|
|
265 # this will be filled with the generation queue. That is, the type
|
|
266 # names in the order where they must be defined.
|
|
267 genOrderQueue : Set = set()
|
470
|
268
|
471
|
269 # the struct names are mapped to their JSON dictionary
|
|
270 structTypes : Dict[str,Dict] = {}
|
470
|
271
|
471
|
272 # the order here is the generation order
|
470
|
273 for definedType in definedTypes:
|
471
|
274 if definedType['kind'] == 'enum':
|
472
|
275 ProcessEnumerationType(genOrderQueue, definedType)
|
470
|
276
|
471
|
277 # the order here is NOT the generation order: the types
|
|
278 # will be processed according to their dependency graph
|
|
279 for definedType in definedTypes:
|
|
280 if definedType['kind'] == 'struct':
|
|
281 structTypes[definedType['name']] = definedType
|
472
|
282 ProcessStructType_DepthFirstRecursive(genOrderQueue,structTypes,
|
|
283 definedType)
|
|
284
|
|
285 print(f"genOrderQueue = {genOrderQueue}")
|
468
|
286
|
|
287 if __name__ == '__main__':
|
|
288 import argparse
|
472
|
289 parser = argparse.ArgumentParser(
|
|
290 usage = """stonegentool.py [-h] [-o OUT_DIR] [-v] input_schemas
|
|
291 EXAMPLE: python command_gen.py -o "generated_files/" """
|
|
292 + """ "mainSchema.json,App Specific Commands.json" """)
|
470
|
293 parser.add_argument("input_schema", type=str,
|
|
294 help = "path to the schema file")
|
468
|
295 parser.add_argument("-o", "--out_dir", type=str, default=".",
|
|
296 help = """path of the directory where the files
|
|
297 will be generated. Default is current
|
|
298 working folder""")
|
|
299 parser.add_argument("-v", "--verbosity", action="count", default=0,
|
|
300 help = """increase output verbosity (0 == errors
|
|
301 only, 1 == some verbosity, 2 == nerd
|
|
302 mode""")
|
|
303
|
|
304 args = parser.parse_args()
|
470
|
305 inputSchemaFilename = args.input_schema
|
469
|
306 outDir = args.out_dir
|
|
307
|
470
|
308 print("input schema = " + str(inputSchemaFilename))
|
469
|
309 print("out dir = " + str(outDir))
|
|
310
|
470
|
311 ProcessSchema(LoadSchema(inputSchemaFilename))
|
|
312
|
468
|
313
|
469
|
314 ###################
|
|
315 ## ATTIC ##
|
|
316 ###################
|
|
317
|
|
318 # this works
|
468
|
319
|
469
|
320 if False:
|
|
321 obj = json.loads("""{
|
|
322 "firstName": "Alice",
|
|
323 "lastName": "Hall",
|
|
324 "age": 35
|
|
325 }""")
|
|
326 print(obj)
|