Mercurial > hg > orthanc-stone
comparison Deprecated/Resources/CodeGeneration/stonegentool.py @ 1401:f6a2d46d2b76
moved CodeGeneration into Deprecated
author | Alain Mazy <alain@mazy.be> |
---|---|
date | Wed, 29 Apr 2020 20:48:18 +0200 |
parents | Resources/CodeGeneration/stonegentool.py@1b47f17863ba |
children |
comparison
equal
deleted
inserted
replaced
1400:419d0320c344 | 1401:f6a2d46d2b76 |
---|---|
1 import json | |
2 import yaml | |
3 import re | |
4 import os | |
5 import sys | |
6 from jinja2 import Template | |
7 from io import StringIO | |
8 import time | |
9 import datetime | |
10 import yamlloader | |
11 | |
12 """ | |
13 1 2 3 4 5 6 7 | |
14 12345678901234567890123456789012345678901234567890123456789012345678901234567890 | |
15 """ | |
16 | |
17 # see https://stackoverflow.com/a/2504457/2927708 | |
18 def trim(docstring): | |
19 if not docstring: | |
20 return '' | |
21 # Convert tabs to spaces (following the normal Python rules) | |
22 # and split into a list of lines: | |
23 lines = docstring.expandtabs().splitlines() | |
24 # Determine minimum indentation (first line doesn't count): | |
25 indent = sys.maxsize | |
26 for line in lines[1:]: | |
27 stripped = line.lstrip() | |
28 if stripped: | |
29 indent = min(indent, len(line) - len(stripped)) | |
30 # Remove indentation (first line is special): | |
31 trimmed = [lines[0].strip()] | |
32 if indent < sys.maxsize: | |
33 for line in lines[1:]: | |
34 trimmed.append(line[indent:].rstrip()) | |
35 # Strip off trailing and leading blank lines: | |
36 while trimmed and not trimmed[-1]: | |
37 trimmed.pop() | |
38 while trimmed and not trimmed[0]: | |
39 trimmed.pop(0) | |
40 # Return a single string: | |
41 return '\n'.join(trimmed) | |
42 | |
43 class JsonHelpers: | |
44 """A set of utilities to perform JSON operations""" | |
45 | |
46 @staticmethod | |
47 def removeCommentsFromJsonContent(string): | |
48 """ | |
49 Remove comments from a JSON file | |
50 | |
51 Comments are not allowed in JSON but, i.e., Orthanc configuration files | |
52 contains C++ like comments that we need to remove before python can | |
53 parse the file | |
54 """ | |
55 # remove all occurrence streamed comments (/*COMMENT */) from string | |
56 string = re.sub(re.compile("/\*.*?\*/", re.DOTALL), "", string) | |
57 | |
58 # remove all occurrence singleline comments (//COMMENT\n ) from string | |
59 string = re.sub(re.compile("//.*?\n"), "", string) | |
60 | |
61 return string | |
62 | |
63 @staticmethod | |
64 def loadJsonWithComments(path): | |
65 """ | |
66 Reads a JSON file that may contain C++ like comments | |
67 """ | |
68 with open(path, "r") as fp: | |
69 fileContent = fp.read() | |
70 fileContent = JsonHelpers.removeCommentsFromJsonContent(fileContent) | |
71 return json.loads(fileContent) | |
72 | |
73 class FieldDefinition: | |
74 | |
75 def __init__(self, name: str, type: str, defaultValue: str): | |
76 self.name = name | |
77 self.type = type | |
78 self.defaultValue = defaultValue | |
79 | |
80 @staticmethod | |
81 def fromKeyValue(key: str, value: str): | |
82 | |
83 if "=" in value: | |
84 splitValue = value.split(sep="=") | |
85 type = splitValue[0].strip(" ") | |
86 defaultValue = splitValue[1].strip(" ") | |
87 else: | |
88 type = value | |
89 defaultValue = None | |
90 | |
91 return FieldDefinition(name = key, type = type, defaultValue = defaultValue) | |
92 | |
93 | |
94 def LoadSchemaFromJson(filePath): | |
95 return JsonHelpers.loadJsonWithComments(filePath) | |
96 | |
97 def CanonToCpp(canonicalTypename): | |
98 # C++: prefix map vector and string with std::map, std::vector and | |
99 # std::string | |
100 # replace int32... by int32_t... | |
101 # replace float32 by float | |
102 # replace float64 by double | |
103 retVal = canonicalTypename | |
104 retVal = retVal.replace("map", "std::map") | |
105 retVal = retVal.replace("vector", "std::vector") | |
106 retVal = retVal.replace("set", "std::set") | |
107 retVal = retVal.replace("string", "std::string") | |
108 #uint32 and uint64 are handled by int32 and uint32 (because search and replace are done as partial words) | |
109 retVal = retVal.replace("int32", "int32_t") | |
110 retVal = retVal.replace("int64", "int64_t") | |
111 retVal = retVal.replace("float32", "float") | |
112 retVal = retVal.replace("float64", "double") | |
113 retVal = retVal.replace("json", "Json::Value") | |
114 return retVal | |
115 | |
116 def CanonToTs(canonicalTypename): | |
117 # TS: replace vector with Array and map with Map | |
118 # string remains string | |
119 # replace int32... by number | |
120 # replace float32... by number | |
121 retVal = canonicalTypename | |
122 retVal = retVal.replace("map", "Map") | |
123 retVal = retVal.replace("vector", "Array") | |
124 retVal = retVal.replace("set", "Set") | |
125 retVal = retVal.replace("uint32", "number") | |
126 retVal = retVal.replace("uint64", "number") | |
127 retVal = retVal.replace("int32", "number") | |
128 retVal = retVal.replace("int64", "number") | |
129 retVal = retVal.replace("float32", "number") | |
130 retVal = retVal.replace("float64", "number") | |
131 retVal = retVal.replace("bool", "boolean") | |
132 retVal = retVal.replace("json", "Object") | |
133 return retVal | |
134 | |
135 def NeedsTsConstruction(enums, tsType): | |
136 if tsType == 'boolean': | |
137 return False | |
138 elif tsType == 'number': | |
139 return False | |
140 elif tsType == 'string': | |
141 return False | |
142 else: | |
143 enumNames = [] | |
144 for enum in enums: | |
145 enumNames.append(enum['name']) | |
146 if tsType in enumNames: | |
147 return False | |
148 return True | |
149 | |
150 def NeedsCppConstruction(canonTypename): | |
151 return False | |
152 | |
153 def DefaultValueToTs(enums, field:FieldDefinition): | |
154 tsType = CanonToTs(field.type) | |
155 | |
156 enumNames = [] | |
157 for enum in enums: | |
158 enumNames.append(enum['name']) | |
159 | |
160 if tsType in enumNames: | |
161 return tsType + "." + field.defaultValue | |
162 else: | |
163 return field.defaultValue | |
164 | |
165 def DefaultValueToCpp(root, enums, field:FieldDefinition): | |
166 cppType = CanonToCpp(field.type) | |
167 | |
168 enumNames = [] | |
169 for enum in enums: | |
170 enumNames.append(enum['name']) | |
171 | |
172 if cppType in enumNames: | |
173 return root + "::" + cppType + "_" + field.defaultValue | |
174 else: | |
175 return field.defaultValue | |
176 | |
177 def RegisterTemplateFunction(template,func): | |
178 """Makes a function callable by a jinja2 template""" | |
179 template.globals[func.__name__] = func | |
180 return func | |
181 | |
182 def MakeTemplate(templateStr): | |
183 template = Template(templateStr) | |
184 RegisterTemplateFunction(template,CanonToCpp) | |
185 RegisterTemplateFunction(template,CanonToTs) | |
186 RegisterTemplateFunction(template,NeedsTsConstruction) | |
187 RegisterTemplateFunction(template,NeedsCppConstruction) | |
188 RegisterTemplateFunction(template, DefaultValueToTs) | |
189 RegisterTemplateFunction(template, DefaultValueToCpp) | |
190 return template | |
191 | |
192 def MakeTemplateFromFile(templateFileName): | |
193 | |
194 with open(templateFileName, "r") as templateFile: | |
195 templateFileContents = templateFile.read() | |
196 return MakeTemplate(templateFileContents) | |
197 | |
198 | |
199 def EatToken(sentence): | |
200 """splits "A,B,C" into "A" and "B,C" where A, B and C are type names | |
201 (including templates) like "int32", "TotoTutu", or | |
202 "map<map<int32,vector<string>>,map<string,int32>>" """ | |
203 | |
204 if sentence.count("<") != sentence.count(">"): | |
205 raise Exception( | |
206 "Error in the partial template type list " + str(sentence) + "." | |
207 + " The number of < and > do not match!" | |
208 ) | |
209 | |
210 # the template level we're currently in | |
211 templateLevel = 0 | |
212 for i in range(len(sentence)): | |
213 if (sentence[i] == ",") and (templateLevel == 0): | |
214 return (sentence[0:i], sentence[i + 1 :]) | |
215 elif sentence[i] == "<": | |
216 templateLevel += 1 | |
217 elif sentence[i] == ">": | |
218 templateLevel -= 1 | |
219 return (sentence, "") | |
220 | |
221 | |
222 def SplitListOfTypes(typename): | |
223 """Splits something like | |
224 vector<string>,int32,map<string,map<string,int32>> | |
225 in: | |
226 - vector<string> | |
227 - int32 | |
228 map<string,map<string,int32>> | |
229 | |
230 This is not possible with a regex so | |
231 """ | |
232 stillStuffToEat = True | |
233 tokenList = [] | |
234 restOfString = typename | |
235 while stillStuffToEat: | |
236 firstToken, restOfString = EatToken(restOfString) | |
237 tokenList.append(firstToken) | |
238 if restOfString == "": | |
239 stillStuffToEat = False | |
240 return tokenList | |
241 | |
242 | |
243 templateRegex = \ | |
244 re.compile(r"([a-zA-Z0-9_]*[a-zA-Z0-9_]*)<([a-zA-Z0-9_,:<>]+)>") | |
245 | |
246 | |
247 def ParseTemplateType(typename): | |
248 """ If the type is a template like "SOMETHING<SOME<THING,EL<SE>>>", | |
249 then it returns (true,"SOMETHING","SOME<THING,EL<SE>>") | |
250 otherwise it returns (false,"","")""" | |
251 | |
252 # let's remove all whitespace from the type | |
253 # split without argument uses any whitespace string as separator | |
254 # (space, tab, newline, return or formfeed) | |
255 typename = "".join(typename.split()) | |
256 matches = templateRegex.match(typename) | |
257 if matches == None: | |
258 return (False, "", []) | |
259 else: | |
260 m = matches | |
261 assert len(m.groups()) == 2 | |
262 # we need to split with the commas that are outside of the | |
263 # defined types. Simply splitting at commas won't work | |
264 listOfDependentTypes = SplitListOfTypes(m.group(2)) | |
265 return (True, m.group(1), listOfDependentTypes) | |
266 | |
267 def GetStructFields(struct): | |
268 """This filters out the special metadata key from the struct fields""" | |
269 return [k for k in struct.keys() if k != '__handler'] | |
270 | |
271 def ComputeOrderFromTypeTree( | |
272 ancestors, | |
273 genOrder, | |
274 shortTypename, schema): | |
275 | |
276 if shortTypename in ancestors: | |
277 raise Exception( | |
278 "Cyclic dependency chain found: the last of " + str(ancestors) + | |
279 + " depends on " + str(shortTypename) + " that is already in the list." | |
280 ) | |
281 | |
282 if not (shortTypename in genOrder): | |
283 (isTemplate, _, dependentTypenames) = ParseTemplateType(shortTypename) | |
284 if isTemplate: | |
285 # if it is a template, it HAS dependent types... They can be | |
286 # anything (primitive, collection, enum, structs..). | |
287 # Let's process them! | |
288 for dependentTypename in dependentTypenames: | |
289 # childAncestors = ancestors.copy() NO TEMPLATE ANCESTOR!!! | |
290 # childAncestors.append(typename) | |
291 ComputeOrderFromTypeTree( | |
292 ancestors, genOrder, dependentTypename, schema | |
293 ) | |
294 else: | |
295 # If it is not template, we are only interested if it is a | |
296 # dependency that we must take into account in the dep graph, | |
297 # i.e., a struct. | |
298 if IsShortStructType(shortTypename, schema): | |
299 struct = schema[GetLongTypename(shortTypename, schema)] | |
300 # The keys in the struct dict are the member names | |
301 # The values in the struct dict are the member types | |
302 if struct: | |
303 # we reach this if struct is not None AND not empty | |
304 for field in GetStructFields(struct): | |
305 # we fill the chain of dependent types (starting here) | |
306 ancestors.append(shortTypename) | |
307 ComputeOrderFromTypeTree( | |
308 ancestors, genOrder, struct[field], schema) | |
309 # don't forget to restore it! | |
310 ancestors.pop() | |
311 | |
312 # now we're pretty sure our dependencies have been processed, | |
313 # we can start marking our code for generation (it might | |
314 # already have been done if someone referenced us earlier) | |
315 if not shortTypename in genOrder: | |
316 genOrder.append(shortTypename) | |
317 | |
318 # +-----------------------+ | |
319 # | Utility functions | | |
320 # +-----------------------+ | |
321 | |
322 def IsShortStructType(typename, schema): | |
323 fullStructName = "struct " + typename | |
324 return (fullStructName in schema) | |
325 | |
326 def GetLongTypename(shortTypename, schema): | |
327 if shortTypename.startswith("enum "): | |
328 raise RuntimeError('shortTypename.startswith("enum "):') | |
329 enumName = "enum " + shortTypename | |
330 isEnum = enumName in schema | |
331 | |
332 if shortTypename.startswith("struct "): | |
333 raise RuntimeError('shortTypename.startswith("struct "):') | |
334 structName = "struct " + shortTypename | |
335 isStruct = ("struct " + shortTypename) in schema | |
336 | |
337 if isEnum and isStruct: | |
338 raise RuntimeError('Enums and structs cannot have the same name') | |
339 | |
340 if isEnum: | |
341 return enumName | |
342 if isStruct: | |
343 return structName | |
344 | |
345 def IsTypename(fullName): | |
346 return (fullName.startswith("enum ") or fullName.startswith("struct ")) | |
347 | |
348 def IsEnumType(fullName): | |
349 return fullName.startswith("enum ") | |
350 | |
351 def IsStructType(fullName): | |
352 return fullName.startswith("struct ") | |
353 | |
354 def GetShortTypename(fullTypename): | |
355 if fullTypename.startswith("struct "): | |
356 return fullTypename[7:] | |
357 elif fullTypename.startswith("enum"): | |
358 return fullTypename[5:] | |
359 else: | |
360 raise RuntimeError \ | |
361 ('fullTypename should start with either "struct " or "enum "') | |
362 | |
363 def CheckSchemaSchema(schema): | |
364 if not "rootName" in schema: | |
365 raise Exception("schema lacks the 'rootName' key") | |
366 for name in schema.keys(): | |
367 if (not IsEnumType(name)) and (not IsStructType(name)) and \ | |
368 (name != 'rootName'): | |
369 raise RuntimeError \ | |
370 ('Type "' + str(name) + '" should start with "enum " or "struct "') | |
371 | |
372 # TODO: check enum fields are unique (in whole namespace) | |
373 # TODO: check struct fields are unique (in each struct) | |
374 # TODO: check that in the source schema, there are spaces after each colon | |
375 | |
376 nonTypeKeys = ['rootName'] | |
377 def GetTypesInSchema(schema): | |
378 """Returns the top schema keys that are actual type names""" | |
379 typeList = [k for k in schema if k not in nonTypeKeys] | |
380 return typeList | |
381 | |
382 # +-----------------------+ | |
383 # | Main processing logic | | |
384 # +-----------------------+ | |
385 | |
386 def ComputeRequiredDeclarationOrder(schema): | |
387 # sanity check | |
388 CheckSchemaSchema(schema) | |
389 | |
390 # we traverse the type dependency graph and we fill a queue with | |
391 # the required struct types, in a bottom-up fashion, to compute | |
392 # the declaration order | |
393 # The genOrder list contains the struct full names in the order | |
394 # where they must be defined. | |
395 # We do not care about the enums here... They do not depend upon | |
396 # anything and we'll handle them, in their original declaration | |
397 # order, at the start | |
398 genOrder = [] | |
399 for fullName in GetTypesInSchema(schema): | |
400 if IsStructType(fullName): | |
401 realName = GetShortTypename(fullName) | |
402 ancestors = [] | |
403 ComputeOrderFromTypeTree(ancestors, genOrder, realName, schema) | |
404 return genOrder | |
405 | |
406 def GetStructFields(fieldDict): | |
407 """Returns the regular (non __handler) struct fields""" | |
408 # the following happens for empty structs | |
409 if fieldDict == None: | |
410 return fieldDict | |
411 ret = {} | |
412 for k,v in fieldDict.items(): | |
413 if k != "__handler": | |
414 ret[k] = FieldDefinition.fromKeyValue(k, v) | |
415 if k.startswith("__") and k != "__handler": | |
416 raise RuntimeError("Fields starting with __ (double underscore) are reserved names!") | |
417 return ret | |
418 | |
419 def GetStructMetadata(fieldDict): | |
420 """Returns the __handler struct fields (there are default values that | |
421 can be overridden by entries in the schema | |
422 Not tested because it's a fail-safe: if something is broken in this, | |
423 dependent projects will not build.""" | |
424 metadataDict = {} | |
425 metadataDict['handleInCpp'] = False | |
426 metadataDict['handleInTypescript'] = False | |
427 | |
428 if fieldDict != None: | |
429 for k,v in fieldDict.items(): | |
430 if k.startswith("__") and k != "__handler": | |
431 raise RuntimeError("Fields starting with __ (double underscore) are reserved names") | |
432 if k == "__handler": | |
433 if type(v) == list: | |
434 for i in v: | |
435 if i == "cpp": | |
436 metadataDict['handleInCpp'] = True | |
437 elif i == "ts": | |
438 metadataDict['handleInTypescript'] = True | |
439 else: | |
440 raise RuntimeError("Error in schema. Allowed values for __handler are \"cpp\" or \"ts\"") | |
441 elif type(v) == str: | |
442 if v == "cpp": | |
443 metadataDict['handleInCpp'] = True | |
444 elif v == "ts": | |
445 metadataDict['handleInTypescript'] = True | |
446 else: | |
447 raise RuntimeError("Error in schema. Allowed values for __handler are \"cpp\" or \"ts\" (or a list of both)") | |
448 else: | |
449 raise RuntimeError("Error in schema. Allowed values for __handler are \"cpp\" or \"ts\" (or a list of both)") | |
450 return metadataDict | |
451 | |
452 def ProcessSchema(schema, genOrder): | |
453 # sanity check | |
454 CheckSchemaSchema(schema) | |
455 | |
456 # let's doctor the schema to clean it up a bit | |
457 # order DOES NOT matter for enums, even though it's a list | |
458 enums = [] | |
459 for fullName in schema.keys(): | |
460 if IsEnumType(fullName): | |
461 # convert "enum Toto" to "Toto" | |
462 typename = GetShortTypename(fullName) | |
463 enum = {} | |
464 enum['name'] = typename | |
465 assert(type(schema[fullName]) == list) | |
466 enum['fields'] = schema[fullName] # must be a list | |
467 enums.append(enum) | |
468 | |
469 # now that the order has been established, we actually store\ | |
470 # the structs in the correct order | |
471 # the structs are like: | |
472 # example = [ | |
473 # { | |
474 # "name": "Message1", | |
475 # "fields": { | |
476 # "someMember":"int32", | |
477 # "someOtherMember":"vector<string>" | |
478 # } | |
479 # }, | |
480 # { | |
481 # "name": "Message2", | |
482 # "fields": { | |
483 # "someMember":"int32", | |
484 # "someOtherMember22":"vector<Message1>" | |
485 # } | |
486 # } | |
487 # ] | |
488 | |
489 structs = [] | |
490 for i in range(len(genOrder)): | |
491 # this is already the short name | |
492 typename = genOrder[i] | |
493 fieldDict = schema["struct " + typename] | |
494 struct = {} | |
495 struct['name'] = typename | |
496 struct['fields'] = GetStructFields(fieldDict) | |
497 struct['__meta__'] = GetStructMetadata(fieldDict) | |
498 structs.append(struct) | |
499 | |
500 templatingDict = {} | |
501 templatingDict['enums'] = enums | |
502 templatingDict['structs'] = structs | |
503 templatingDict['rootName'] = schema['rootName'] | |
504 | |
505 return templatingDict | |
506 | |
507 # +-----------------------+ | |
508 # | Write to files | | |
509 # +-----------------------+ | |
510 | |
511 # def WriteStreamsToFiles(rootName: str, genc: Dict[str, StringIO]) \ | |
512 # -> None: | |
513 # pass | |
514 | |
515 def LoadSchema(fn): | |
516 # latin-1 is a trick, when we do NOT care about NON-ascii chars but | |
517 # we wish to avoid using a decoding error handler | |
518 # (see http://python-notes.curiousefficiency.org/en/latest/python3/text_file_processing.html#files-in-an-ascii-compatible-encoding-best-effort-is-acceptable) | |
519 # TL;DR: all 256 values are mapped to characters in latin-1 so the file | |
520 # contents never cause an error. | |
521 with open(fn, 'r', encoding='latin-1') as f: | |
522 schemaText = f.read() | |
523 assert(type(schemaText) == str) | |
524 return LoadSchemaFromString(schemaText = schemaText) | |
525 | |
526 def LoadSchemaFromString(schemaText:str): | |
527 # ensure there is a space after each colon. Otherwise, dicts could be | |
528 # erroneously recognized as an array of strings containing ':' | |
529 for i in range(len(schemaText)-1): | |
530 ch = schemaText[i] | |
531 nextCh = schemaText[i+1] | |
532 if ch == ':': | |
533 if not (nextCh == ' ' or nextCh == '\n'): | |
534 lineNumber = schemaText.count("\n",0,i) + 1 | |
535 raise RuntimeError("Error at line " + str(lineNumber) + " in the schema: colons must be followed by a space or a newline!") | |
536 schema = yaml.load(schemaText, Loader = yamlloader.ordereddict.SafeLoader) | |
537 return schema | |
538 | |
539 def GetTemplatingDictFromSchemaFilename(fn): | |
540 return GetTemplatingDictFromSchema(LoadSchema(fn)) | |
541 | |
542 def GetTemplatingDictFromSchema(schema): | |
543 genOrder = ComputeRequiredDeclarationOrder(schema) | |
544 templatingDict = ProcessSchema(schema, genOrder) | |
545 currentDT = datetime.datetime.now() | |
546 templatingDict['currentDatetime'] = str(currentDT) | |
547 return templatingDict | |
548 | |
549 # +-----------------------+ | |
550 # | ENTRY POINT | | |
551 # +-----------------------+ | |
552 def Process(schemaFile, outDir): | |
553 tdico = GetTemplatingDictFromSchemaFilename(schemaFile) | |
554 | |
555 tsTemplateFile = \ | |
556 os.path.join(os.path.dirname(__file__), 'template.in.ts.j2') | |
557 template = MakeTemplateFromFile(tsTemplateFile) | |
558 renderedTsCode = template.render(**tdico) | |
559 outputTsFile = os.path.join( \ | |
560 outDir,str(tdico['rootName']) + "_generated.ts") | |
561 with open(outputTsFile,"wt",encoding='utf8') as outFile: | |
562 outFile.write(renderedTsCode) | |
563 | |
564 cppTemplateFile = \ | |
565 os.path.join(os.path.dirname(__file__), 'template.in.h.j2') | |
566 template = MakeTemplateFromFile(cppTemplateFile) | |
567 renderedCppCode = template.render(**tdico) | |
568 outputCppFile = os.path.join( \ | |
569 outDir, str(tdico['rootName']) + "_generated.hpp") | |
570 with open(outputCppFile,"wt",encoding='utf8') as outFile: | |
571 outFile.write(renderedCppCode) | |
572 | |
573 if __name__ == "__main__": | |
574 import argparse | |
575 | |
576 parser = argparse.ArgumentParser( | |
577 usage="""stonegentool.py [-h] [-o OUT_DIR] [-v] input_schema | |
578 EXAMPLE: python stonegentool.py -o "generated_files/" """ | |
579 + """ "mainSchema.yaml,App Specific Commands.json" """ | |
580 ) | |
581 parser.add_argument("input_schema", type=str, \ | |
582 help="path to the schema file") | |
583 parser.add_argument( | |
584 "-o", | |
585 "--out_dir", | |
586 type=str, | |
587 default=".", | |
588 help="""path of the directory where the files | |
589 will be generated. Default is current | |
590 working folder""", | |
591 ) | |
592 parser.add_argument( | |
593 "-v", | |
594 "--verbosity", | |
595 action="count", | |
596 default=0, | |
597 help="""increase output verbosity (0 == errors | |
598 only, 1 == some verbosity, 2 == nerd | |
599 mode""", | |
600 ) | |
601 | |
602 args = parser.parse_args() | |
603 schemaFile = args.input_schema | |
604 outDir = args.out_dir | |
605 Process(schemaFile, outDir) |