#########################################################
# Copyright (C) 2022 SiMa Technologies, Inc.
#
# This material is SiMa proprietary and confidential.
#
# This material may not be copied or distributed without
# the express prior written permission of SiMa.
#
# All rights reserved.
#########################################################
# Code owner: Stefan Jovic
#########################################################
import copy
import dataclasses
import json
import os
import pathlib
import sys
import subprocess
import tarfile
import tempfile
from afe.backends import Backend, BackendIR
from afe.backends.apu.tvm_apu_compiler import CompiledTVMObjectFile
from afe.backends.mla.afe_to_n2a_compiler.defines import L2CachingMode
from afe.backends.mpk.defines import (
PluginMPKData, MLAConfigParamsMPKData, InOutNodesMPKData, PluginResourcesMPKData,
AwesomeNetMPKData, EV74ConfigParamsMPKData, APUConfigParamsMPKData, PluginInputNodeMPKData, TensorTypeMPKData,
ModelInputMPKData,
)
from afe.backends.ev.sima_ev_kernels import SIMA_EV_KERNELS
from afe.backends.mpk.node import get_node_size, flatten_tuple, get_output_node_names
from afe.backends.mpk.operator import make_ev_plugin_parameters
from afe.core.compile_networks import TessellateParameters, compile_net_to_elf, get_zip_file_path, compute_checksum
from afe.ir.attributes import TupleGetItemAttrs
from afe.ir.defines import Status, NodeName, TupleValue, TensorValue, DataValue, get_expected_tensor_value
from afe.apis._sanitize_errors import sanitize_afe_error
from afe.ir.net import AwesomeNet
from afe.ir.node import (
AwesomeNode, node_is_placeholder, node_is_sima_ir, node_is_backend_ir, node_is_tuple_get_item,
node_is_tuple
)
from sima_utils.logging import sima_logger
from afe.ir.tensor_type import TensorType, data_byte_size, tensor_byte_size
# Information about all MPK JSON nodes that have been produced by the algorithm.
# Each MPK JSON node represents a tensor that is computed when the network is executed.
[docs]
InOutMPKDataDict = dict[NodeName, DataValue[InOutNodesMPKData]]
def _generate_json_tensor_type(tensor_type: TensorType) -> TensorTypeMPKData:
"""
Convert a tensor type's shape to a format that can be written to JSON file.
Args:
tensor_type: Type to convert
Returns:
Tensor type that can be written to JSON file
"""
return TensorTypeMPKData(scalar=tensor_type.scalar.name, shape=list(tensor_type.shape))
[docs]
def generate_mla_plugin_mpk_data(node: AwesomeNode, input_nodes: list[PluginInputNodeMPKData],
sequence: int, stage: int, model_name: str,
desired_batch_size: int, actual_batch_size: int,
output_names: dict[str, str]) -> PluginMPKData:
"""
Generating MPK JSON data for MLA plugin.
Args:
node: AwesomeNode
input_nodes: List of AwesomeNode's input node names. May differ from
node.input_node_names as Tuple and TupleGetItem nodes outputs are redirected
to their inputs and Unpack node output is split to multiple outputs.
sequence: Plugin's position in the model's execution sequence.
stage: Stage number of the graph. Every graph have a unique stage number
that represents their order in AwesomeNet.
model_name: Name of the model. Used to create an .elf file name.
desired_batch_size: Batch size requested by user.
actual_batch_size: Input batch size value used by compiler in .elf file generation.
output_names: Dictionary mapping Model SDK names to original model names.
Returns:
PluginMPKData class.
"""
node_name = node.name
processor = "MLA"
config_params = MLAConfigParamsMPKData(desired_batch_size=desired_batch_size,
actual_batch_size=actual_batch_size)
node_type = node.get_type()
output_sizes = [int(data_byte_size(TensorValue(t))) for t in flatten_tuple(node_type.output)]
output_names = flatten_tuple(get_output_node_names(node_name, node_type.output, output_names))
output_nodes = [InOutNodesMPKData(name=name, size=size) for name, size in zip(output_names, output_sizes)]
elf_file_name = f"{model_name}_stage{stage}_mla.elf"
# Record the file path relative to the output directory, which is only the filename
resources = PluginResourcesMPKData(elf_file_name)
plugin_mpk_data = PluginMPKData(name=node_name,
sequence=sequence,
processor=processor,
config_params=config_params,
input_nodes=input_nodes,
output_nodes=output_nodes,
resources=resources)
return plugin_mpk_data
[docs]
def generate_apu_plugin_mpk_data(node: AwesomeNode, input_nodes: list[PluginInputNodeMPKData], sequence: int,
stage: int, model_name: str, output_names: dict[str, str]) -> PluginMPKData:
"""
Produce the MPK JSON data for executing an APU compiled object file using TVM's runtime.
Args:
node: Node to produce code for. It must be a BackendIR node using the APU backend.
input_nodes: List of AwesomeNode's input node names. May differ from
node.input_node_names as Tuple and TupleGetItem nodes outputs are redirected
to their inputs and Unpack node output is split to multiple outputs.
sequence: Plugin's position in the model's execution sequence.
stage: Stage number of the graph. Every graph have a unique stage number
that represents their order in AwesomeNet.
model_name: Name of the model. Used to create an .so file name.
output_names: Dictionary mapping Model SDK names to original model names.
Returns:
PluginMPKData instance representing the input node
"""
assert isinstance(node.ir, BackendIR)
assert node.ir.backend == Backend.APU
assert isinstance(node.ir.graph, CompiledTVMObjectFile)
node_name = node.name
node_param_names = node.input_names
processor = "A65"
node_type = node.get_type()
input_types = [_generate_json_tensor_type(get_expected_tensor_value(x))
for x in node_type.inputs.values()]
output_types = [_generate_json_tensor_type(x) for x in flatten_tuple(node_type.output)]
output_sizes = [int(data_byte_size(TensorValue(t))) for t in flatten_tuple(node_type.output)]
output_names = flatten_tuple(get_output_node_names(node_name, node_type.output, output_names))
output_nodes = [InOutNodesMPKData(name=name, size=size) for name, size in zip(output_names, output_sizes)]
config_params = APUConfigParamsMPKData(input_names=node_param_names, input_types=input_types,
output_types=output_types)
object_file_name = f'{model_name}_stage{stage}_a65.so'
resources = PluginResourcesMPKData(object_file_name)
plugin_mpk_data = PluginMPKData(name=node_name,
sequence=sequence,
processor=processor,
config_params=config_params,
input_nodes=input_nodes,
output_nodes=output_nodes,
resources=resources)
return plugin_mpk_data
[docs]
def generate_ev74_plugin_mpk_data(node: AwesomeNode, input_nodes: list[PluginInputNodeMPKData], sequence: int,
desired_batch_size: int, actual_batch_size: int,
output_names: dict[str, str]) -> PluginMPKData:
"""
Produce the MPK JSON data for executing an EV74 plugin.
Args:
node: AwesomeNode
input_nodes: List of AwesomeNode's inputs as they are in
the MPK JSON file, in the form of MPK JSON nodes. This list reflects
the way nodes were processed to eliminate tuples.
sequence: Plugin's position in the model's execution sequence.
desired_batch_size: Batch size requested by user.
actual_batch_size: Batch size used in code generation.
output_names: Dictionary mapping Model SDK names to original model names.
Returns:
PluginMPKData class.
"""
name = node.name
kernel_params = make_ev_plugin_parameters(node, input_nodes, desired_batch_size, actual_batch_size, output_names)
sima_kernel_name = SIMA_EV_KERNELS[type(node.ir.operation)]
resources = PluginResourcesMPKData(sima_kernel_name)
return PluginMPKData(name=name,
sequence=sequence,
processor=kernel_params.processor,
config_params=kernel_params.config_params,
input_nodes=kernel_params.input_nodes,
output_nodes=kernel_params.output_nodes,
resources=resources)
[docs]
def generate_pass_through_plugin(input_nodes: list[InOutNodesMPKData], sequence: int,
desired_batch_size: int, actual_batch_size: int) -> PluginMPKData:
"""
Generate MPK JSON data for PassThrough plugin.
Args:
input_nodes: Input nodes of a plugin.
sequence: Plugin's position in the model's execution sequence.
desired_batch_size: Batch size requested by user.
actual_batch_size: Batch size used in code generation.
Returns:
PluginMPKData class.
"""
output_nodes = list()
for idx, input_node in enumerate(input_nodes):
size = input_node.size
output_nodes.append(InOutNodesMPKData(name=f'pass_through_out_{idx}', size=size))
return PluginMPKData(name="PassThrough",
sequence=sequence,
processor="EV74",
config_params=EV74ConfigParamsMPKData(desired_batch_size, actual_batch_size,
"pass_through", {}),
input_nodes=[PluginInputNodeMPKData(n.name, n.size) for n in input_nodes],
output_nodes=output_nodes,
resources=None)
def _generate_inout_nodes_mpk_data(net: AwesomeNet, nodes: list[NodeName]) -> list[InOutNodesMPKData]:
"""
Generate MPK JSON data for input or output nodes.
Args:
net: AwesomeNet.
nodes: List of input or output nodes.
Returns:
InOutNodesMPKData class.
"""
in_out_nodes_mpk_data = list()
for node in nodes:
name = node
size = get_node_size(net.nodes[name])
in_out_nodes_mpk_data.append(InOutNodesMPKData(name=name, size=size))
return in_out_nodes_mpk_data
[docs]
def generate_output_nodes_mpk_data(net: AwesomeNet,
output_nodes: list[NodeName] | None = None) -> list[InOutNodesMPKData]:
"""
Generate MPK JSON data for output nodes.
Args:
net: AwesomeNet.
output_nodes: Optional parameter, output nodes if output is tuple node.
Returns:
InOutNodesMPKData class.
"""
output_node_names = output_nodes if output_nodes is not None else [net.output_node_name]
return _generate_inout_nodes_mpk_data(net, output_node_names)
[docs]
def log_compilation_summary(data: AwesomeNetMPKData, desired_batch_size: int, filenames: list[str]) -> None:
"""
Print compilation summary.
Args:
data: AwesomeNetMPKData class.
desired_batch_size: Batch size requested by user.
filenames: List of file names generated after compilation.
Returns:
None
"""
actual_batch_size = data.get_actual_batch_size()
actual_batch_size = actual_batch_size if actual_batch_size is not None else desired_batch_size
plugin_distribution = data.get_plugins_backend_distribution()
sima_logger.sima_log_info(30 * '=')
sima_logger.sima_log_info('Compilation summary:')
sima_logger.sima_log_info(30 * '-')
sima_logger.sima_log_info(f'Desired batch size: {desired_batch_size}')
sima_logger.sima_log_info(f'Achieved batch size: {actual_batch_size}')
sima_logger.sima_log_info(30 * '-')
sima_logger.sima_log_info('Plugin distribution per backend:')
for plugin in ["MLA", "EV74", "A65"]:
sima_logger.sima_log_info(f'\t{plugin:4}: {plugin_distribution[plugin]}')
sima_logger.sima_log_info(30 * '-')
sima_logger.sima_log_info('Generated files: ' + ", ".join(filenames))
def _make_output_mpk_data(base_name: str, output_type: DataValue[TensorType],
output_names: dict[str, str]) -> DataValue[InOutNodesMPKData]:
"""
Make the MPK JSON representation of a data value representing some computed output of a node.
Args:
base_name: Initial part of the node name to use in the MPK JSON file. If the
data is a tuple, something will be appended to make unique names.
output_type: The node's output type.
Returns:
DataValue holding the MPK JSON data identifying each output tensor.
"""
if isinstance(output_type, TensorValue):
out_node_name = get_output_node_names(base_name, output_type, output_names)
assert isinstance(out_node_name, TensorValue)
return TensorValue(InOutNodesMPKData(name=out_node_name.value, size=tensor_byte_size(output_type.value)))
else:
assert isinstance(output_type, TupleValue)
return TupleValue([_make_output_mpk_data(f"{base_name}_{i}", e, output_names)
for i, e in enumerate(output_type.elements)])
def _node_output_mpk_data(node: AwesomeNode, node_values_dict: InOutMPKDataDict,
output_names: dict[str, str]) -> DataValue[InOutNodesMPKData]:
"""
Make the MPK JSON representation of a data value representing a node's outputs.
Args:
node: Node to process
Returns:
DataValue holding the MPK JSON data identifying each output tensor.
"""
if node_is_tuple(node):
# Construct a tuple value of the node's inputs
return TupleValue([node_values_dict[node_name] for node_name in node.input_node_names])
elif node_is_tuple_get_item(node):
# Select one item from the node's input, which must be a tuple
assert len(node.input_node_names) == 1
tuple_node_name = node.input_node_names[0]
tuple_input_nodes = node_values_dict[tuple_node_name]
assert isinstance(tuple_input_nodes, TupleValue)
attrs = node.ir.attrs
assert isinstance(attrs, TupleGetItemAttrs)
return tuple_input_nodes.elements[attrs.index]
else:
# The node will become an MPK JSON plugin.
# Create new node names for its outputs.
return _make_output_mpk_data(node.name, node.get_type().output, output_names)
def _update_nodes_dict(node: AwesomeNode, node_values_dict: InOutMPKDataDict, output_names: dict[str, str]):
"""
Helper function for updating node values dictionary.
Before updating dictionary should only contain input nodes values.
"""
assert node.name not in node_values_dict
node_values_dict[node.name] = _node_output_mpk_data(node, node_values_dict, output_names)
def _get_tuple_inputs(tuple_node: DataValue[InOutNodesMPKData]) -> list[InOutNodesMPKData]:
"""
Get inputs from a tuple node.
"""
assert isinstance(tuple_node, TupleValue)
return flatten_tuple(tuple_node)
def _get_node_input_values(node: AwesomeNode, node_values_dict: InOutMPKDataDict) -> list[PluginInputNodeMPKData]:
"""
Get MPK JSON data representing input nodes for AwesomeNode. The list may differ from
node.input_node_names as Tuple and TupleGetItem nodes outputs are
redirected to their inputs and Unpack node output is split into
multiple outputs.
Args:
node: AwesomeNode
node_values_dict: Dictionary containing mapping from AwesomeNode
IDs to MPK JSON node IDs.
Returns:
List of MPK JSON node IDs which represent the input of the AwesomeNode being processed.
"""
node_names: list[PluginInputNodeMPKData] = list()
for input_node_name in node.input_node_names:
assert input_node_name in node_values_dict
node_name_data_value = node_values_dict[input_node_name]
value = get_expected_tensor_value(node_name_data_value)
node_names.append(PluginInputNodeMPKData(name=value.name, size=value.size))
return node_names
def _map_original_model_out_name_to_tgi_input(awesome_net: AwesomeNet, tgi_node: AwesomeNode,
output_names: dict[str, str], orig_name: str) -> dict[str, str]:
"""
Helper function for mapping original model output names to appropriate tuple input node name.
"""
node = awesome_net.nodes[tgi_node.input_node_names[0]]
tgi_attrs = tgi_node.ir.attrs
assert isinstance(tgi_attrs, TupleGetItemAttrs)
if node_is_tuple(node):
index = tgi_attrs.index
tuple_input = awesome_net.nodes[node.input_node_names[index]].name
output_names = _map_original_model_out_name_to_tuple_input(awesome_net, [tuple_input], [orig_name],
output_names)
else:
node_have_multiple_outs = isinstance(node.get_type().output, TupleValue)
# If input node of tgi node have multiple outputs add an index to node name
# to match exactly which of those outputs will be passed to tgi node
node_name = node.name + f'_{tgi_attrs.index}' if node_have_multiple_outs else node.name
output_names[node_name] = orig_name
return output_names
def _map_original_model_out_name_to_tuple_input(awesome_net: AwesomeNet, tuple_inputs: list[NodeName],
output_labels: list, output_names: dict[str, str]) -> dict[str, str]:
"""
Helper function for mapping original model output names to appropriate tuple input node name.
"""
for idx, (node_name, orig_name) in enumerate(zip(tuple_inputs, output_labels)):
tuple_input_node = awesome_net.nodes[node_name]
if node_is_tuple_get_item(tuple_input_node):
output_names = _map_original_model_out_name_to_tgi_input(awesome_net, tuple_input_node, output_names,
orig_name)
else:
output_names[node_name] = orig_name
return output_names
def _map_original_model_node_name(awesome_net: AwesomeNet) -> dict[str, str]:
"""
Map original model's output name to the last AwesomeNet node name.
If last node is a tuple node, map output names to the inputs of the tuple node.
"""
output_names = {}
output_labels = awesome_net.output_labels
last_node = awesome_net.nodes[awesome_net.output_node_name]
if output_labels is not None:
if node_is_tuple(last_node):
tuple_inputs = last_node.input_node_names
assert len(output_labels) == len(tuple_inputs)
output_names = _map_original_model_out_name_to_tuple_input(awesome_net, tuple_inputs, output_labels,
output_names)
else:
assert len(output_labels) == 1
output_names[last_node.name] = output_labels[0]
return output_names
[docs]
def generate_plugins_mpk_data(net: AwesomeNet,
desired_batch_size: int,
actual_batch_size: int) -> tuple[list[PluginMPKData], list[InOutNodesMPKData]]:
"""
Generate MPK data for MLA and EV74 plugins.
Args:
net: AwesomeNet.
desired_batch_size: Batch size requested by user.
actual_batch_size: Batch size used in code generation.
Returns:
List of PluginMPKData classes.
"""
sequence = 1
plugins_mpk_data: list[PluginMPKData] = list()
node_values_dict: InOutMPKDataDict = {}
output_names = _map_original_model_node_name(net)
stage = 1
for n in net.execution_order:
node = net.nodes[n]
_update_nodes_dict(node, node_values_dict, output_names)
assert node.name in node_values_dict
if node_is_placeholder(node) or node_is_tuple(node) or node_is_tuple_get_item(node):
# Inputs of the graph are not included in the plugin list
# Tuple nodes are handled separately at the end
continue
input_nodes: list[PluginInputNodeMPKData] = _get_node_input_values(node, node_values_dict)
if node_is_sima_ir(node):
# All SiMaIR nodes (except the ones that were skipped) are translated to EV plugins
data = generate_ev74_plugin_mpk_data(node, input_nodes, sequence, desired_batch_size,
actual_batch_size, output_names)
plugins_mpk_data.append(data)
sequence += 1
elif node_is_backend_ir(node):
if node.ir.backend == Backend.MLA:
data = generate_mla_plugin_mpk_data(node, input_nodes, sequence, stage, net.name,
desired_batch_size, actual_batch_size, output_names)
stage += 1
elif node.ir.backend == Backend.APU:
data = generate_apu_plugin_mpk_data(node, input_nodes, sequence, stage, net.name, output_names)
stage += 1
else:
raise NotImplementedError("MPK JSON output is not supported for backend {}".format(node.ir.backend))
plugins_mpk_data.append(data)
sequence += 1
else:
raise TypeError(f'Not supported IR: {node.ir}')
# When output of a network is tuple, a PassThrough plugin is generated where
# inputs of a plugin are elements from tuple
assert net.output_node_name in node_values_dict
output_node_value = node_values_dict[net.output_node_name]
if isinstance(output_node_value, TupleValue):
unpacked_output_nodes = _get_tuple_inputs(output_node_value)
data = generate_pass_through_plugin(unpacked_output_nodes, sequence, desired_batch_size, actual_batch_size)
plugins_mpk_data.append(data)
# Collect input node information that was populated from PlaceholderOp nodes
net_input_nodes = []
for name in net.input_node_names:
v = node_values_dict[name]
net_input_nodes.append(get_expected_tensor_value(v))
return plugins_mpk_data, net_input_nodes
[docs]
def generate_mpk_json_data(net: AwesomeNet, elf_file_path: str, batch_size: int, compress: bool,
tessellate_parameters: TessellateParameters | None,
enable_large_tensors: bool = True,
l2_caching_mode: L2CachingMode = L2CachingMode.NONE,
mlc_files_path: str | None = None,
use_power_limits: bool = False,
max_mla_power: float | None = None,
layer_norm_use_fp32_intermediates: bool = False,
rms_norm_use_fp32_intermediates: bool = False) -> AwesomeNetMPKData:
"""
Generate MPK JSON data.
Args:
net: AwesomeNet.
elf_file_path: ELF file directory path.
batch_size: The batch size of the input to the model.
compress: If True mlc file is compressed before generating .elf file.
tessellate_parameters: Dictionary defining the tessellation parameters
for inputs and outputs of the MLA segments.
enable_large_tensors: If true, the MLA will handle large tensors, otherwise
large tensors will raise an exception.
l2_caching_mode: Specifies mode of L2 caching in n2a compiler.
mlc_files_path: Mlc files path. If provided .mlc files will be saved.
use_power_limits: If true, the MLA compiler will schedule instructions to conform to power limits.
max_mla_power: Set to a positive float value to override default max MLA power when power limits are used.
layer_norm_use_fp32_intermediates: Use FP32 intermediate tensors in BF16 LayerNorm kernel.
rms_norm_use_fp32_intermediates: Use FP32 intermediate tensors in BF16 RMSNorm kernel
Returns:
AwesomeNetMPKData class.
"""
# Copy the input AwesomeNet as it's mutated in the processing.
net_mut = copy.deepcopy(net)
name = net_mut.name
compiler_batch_size, _ = compile_net_to_elf(
net_mut, elf_file_path, batch_size, compress, tessellate_parameters,
enable_large_tensors=enable_large_tensors, l2_caching_mode=l2_caching_mode,
mlc_files_path=mlc_files_path, do_pack=False, use_power_limits=use_power_limits,
max_power=max_mla_power,
layer_norm_use_fp32_intermediates=layer_norm_use_fp32_intermediates,
rms_norm_use_fp32_intermediates=rms_norm_use_fp32_intermediates
)
net_mut.set_batch_size(compiler_batch_size)
plugins, input_nodes = generate_plugins_mpk_data(net_mut, batch_size, compiler_batch_size)
model_input_mpk_data = [ModelInputMPKData(name=input_node.name, size=input_node.size, type=input_node.type,
input_range=net.fp_input_range[input_node.name] if
net.fp_input_range is not None else None) for input_node in input_nodes]
# Get original model path and compute checksum
model_path = net.model_path
model_checksum = compute_checksum(model_path) if model_path is not None else None
data = AwesomeNetMPKData(name=name,
model_path=net.model_path,
model_checksum=model_checksum,
input_nodes=model_input_mpk_data,
plugins=plugins)
return data
[docs]
def generate_mpk_json_file(net: AwesomeNet, file_path: str, batch_size: int = 1, compress: bool = True,
tessellate_parameters: TessellateParameters | None = None,
enable_large_tensors: bool = True,
l2_caching_mode: L2CachingMode = L2CachingMode.NONE,
mlc_files_path: str | None = None,
use_power_limits: bool = False,
max_mla_power: float | None = None,
layer_norm_use_fp32_intermediates: bool = False,
rms_norm_use_fp32_intermediates: bool = False) -> None:
"""
Generate MPK JSON file.
Args:
net: AwesomeNet.
file_path: Elf file directory path.
batch_size: The batch size of the input to the model.
compress: If True mlc file is compressed before generating .elf file.
tessellate_parameters: Dictionary defining the tessellation parameters
for inputs and outputs of the MLA segments.
enable_large_tensors: If true, the MLA will handle large tensors, otherwise
large tensors will raise an exception.
l2_caching_mode: Specifies mode of L2 caching in n2a compiler.
mlc_files_path: Mlc files path. If provided .mlc files will be saved.
use_power_limits: If true, the MLA compiler will schedule instructions to conform to power limits.
max_mla_power: Set to a positive float value to override default max MLA power when power limits are used.
layer_norm_use_fp32_intermediates: Use FP32 intermediate tensors in BF16 LayerNorm kernel.
rms_norm_use_fp32_intermediates: Use FP32 intermediate tensors in BF16 RMSNorm kernel.
Returns:
None
"""
if net.status != Status.SIMA_QUANTIZED and net.has_mla_nodes():
raise sima_logger.UserFacingException("Network must be quantized. Please run quantization first!")
with tempfile.TemporaryDirectory() as tmpdirname:
try:
data = generate_mpk_json_data(
net, tmpdirname, batch_size, compress, tessellate_parameters,
enable_large_tensors=enable_large_tensors, l2_caching_mode=l2_caching_mode,
mlc_files_path=mlc_files_path, use_power_limits=use_power_limits,
max_mla_power=max_mla_power,
layer_norm_use_fp32_intermediates=layer_norm_use_fp32_intermediates,
rms_norm_use_fp32_intermediates=rms_norm_use_fp32_intermediates
)
except Exception as e:
sanitize_afe_error("Failed to generate mpk json file", e)
# Serializing json
json_object = json.dumps(dataclasses.asdict(data), indent=4)
json_file_path = os.path.join(tmpdirname, net.name + '_mpk.json')
with open(json_file_path, "w") as outfile:
outfile.write(json_object)
# Parse MPK JSON file
import mpk_parser
mpk_parser_path = os.path.dirname(mpk_parser.__file__)
script_path = os.path.join(mpk_parser_path, 'mpk2conf.py')
command = ['python3', script_path, f'-mpk={json_file_path}', f'-mpkdir={tmpdirname}']
result = subprocess.run(command)
if result.returncode != 0:
sima_logger.sima_log_info('Failed to parse mpk json file.')
# Extract all files then zip it again including .json file
zip_file_path = get_zip_file_path(file_path, net.name)
filenames = list()
with tarfile.open(zip_file_path, "w:gz") as f:
for file in pathlib.Path(tmpdirname).iterdir():
if file.suffix in ['.elf', '.so', '.yaml', '.json']:
filenames.append(file.name)
f.add(file, arcname=file.name)
try:
script_path = sys.argv[0]
if os.path.exists(script_path):
script_name = os.path.basename(script_path)
filenames.append(script_name)
arcname = 'archived_compile_script.' + script_name
f.add(script_path, arcname=arcname)
except Exception as e:
sima_logger.sima_log_warning(f'Failed to add script to tar.gz file: {str(e)}')
log_compilation_summary(data, batch_size, filenames)