Skip to content
This repository was archived by the owner on Nov 28, 2025. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
265 changes: 201 additions & 64 deletions examples/fx_integerization/integerize_pactnets.py

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion quantlib
Submodule quantlib updated 34 files
+265 −52 algorithms/pact/pact_ops.py
+1 −2 backends/__init__.py
+0 −23 backends/abstract_net/__init__.py
+0 −183 backends/abstract_net/abstract_net.py
+14 −14 backends/cutie/cutie_export.py
+63 −16 backends/deeploy/pact_export.py
+2 −1 backends/dory/__init__.py
+58 −43 backends/dory/dory_passes.py
+213 −0 backends/dory/dvs_pact_export.py
+2 −172 backends/dory/pact_export.py
+0 −23 backends/twn_accelerator/__init__.py
+0 −578 backends/twn_accelerator/acl.py
+0 −226 backends/twn_accelerator/compiler_vgg.py
+0 −191 backends/twn_accelerator/debug.py
+0 −166 backends/twn_accelerator/gammabeta.py
+0 −399 backends/twn_accelerator/layers.py
+0 −96 backends/twn_accelerator/quantops.py
+0 −7 backends/twn_accelerator/source.json
+0 −107 backends/twn_accelerator/templates/acl_net.cpp.mako
+0 −44 backends/twn_accelerator/templates/acl_net.h.mako
+0 −41 backends/twn_accelerator/templates/get_layer.c.mako
+0 −17 backends/twn_accelerator/templates/get_layer.h.mako
+0 −33 backends/twn_accelerator/templates/get_net.c.mako
+0 −17 backends/twn_accelerator/templates/get_net.h.mako
+0 −35 backends/twn_accelerator/templates/twn_layer_defs.h.mako
+0 −270 backends/twn_accelerator/twn_accelerator.py
+0 −170 backends/twn_accelerator/weights.py
+29 −2 editing/fx/passes/eps.py
+53 −32 editing/fx/passes/pact/approximate.py
+95 −41 editing/fx/passes/pact/harmonize.py
+178 −93 editing/fx/passes/pact/integerize.py
+2 −0 editing/fx/passes/pact/pact_util.py
+7 −1 editing/fx/util/util.py
+1 −2 editing/lightweight/rules/filters.py
1 change: 1 addition & 0 deletions systems/CIFAR10/ResNet/quantize/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
from .pact import *
from .bb import *
52 changes: 37 additions & 15 deletions systems/CIFAR10/ResNet/quantize/pact.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
import quantlib.editing.lightweight.rules as qlr
from quantlib.editing.lightweight.rules.filters import VariadicOrFilter, NameFilter, TypeFilter
from quantlib.editing.fx.passes.pact import HarmonizePACTNetPass, PACT_symbolic_trace
from quantlib.editing.fx.util import module_of_node

from quantlib.algorithms.pact.pact_ops import *
from quantlib.algorithms.pact.pact_controllers import *
Expand All @@ -45,39 +46,49 @@ def pact_recipe(net : nn.Module,
# An additional dict is expected to be stored under the key "kwargs", which
# is used as the default kwargs.

filter_conv2d = TypeFilter(nn.Conv2d)
filter_linear = TypeFilter(nn.Linear)
act_types = (nn.ReLU, nn.ReLU6)
filter_acts = VariadicOrFilter(*[TypeFilter(t) for t in act_types])
uact_types = (nn.ReLU, nn.ReLU6)
sact_types = (nn.Hardtanh,)

rhos = []
conv_cfg = config["PACTConv2d"]
lin_cfg = config["PACTLinear"]
act_cfg = config["PACTUnsignedAct"]

harmonize_cfg = config["harmonize"]
uact_cfg = config["PACTUnsignedAct"]

try:
sact_cfg = config["PACTAsymmetricAct"]
except KeyError:
sact_cfg = {}

try:
last_add_8b = config['last_add_8b']
except KeyError:
last_add_8b = False

def make_rules(cfg : dict,
rule : type):
harmonize_cfg = config["harmonize"]

def make_rules(cfg : dict, t : tuple,
rule : type, **kwargs):
rules = []
default_cfg = cfg["kwargs"] if "kwargs" in cfg.keys() else {}
layer_keys = [k for k in cfg.keys() if k != "kwargs"]
type_filter = VariadicOrFilter(*[TypeFilter(tt) for tt in t])
print("type filter: ", type_filter)
for k in layer_keys:
filt = NameFilter(k)
kwargs = default_cfg.copy()
filt = NameFilter(k) & type_filter
kwargs.update(default_cfg)
kwargs.update(cfg[k])
rho = rule(filt, **kwargs)
rules.append(rho)
return rules

rhos += make_rules(conv_cfg,
rhos += make_rules(conv_cfg, (nn.Conv2d,),
qlr.pact.ReplaceConvLinearPACTRule)
rhos += make_rules(lin_cfg,
rhos += make_rules(lin_cfg, (nn.Linear,),
qlr.pact.ReplaceConvLinearPACTRule)
rhos += make_rules(act_cfg,
qlr.pact.ReplaceActPACTRule)
rhos += make_rules(uact_cfg, uact_types,
qlr.pact.ReplaceActPACTRule, signed=False)
rhos += make_rules(sact_cfg, sact_types,
qlr.pact.ReplaceActPACTRule, signed=True)

lwg = qlw.LightweightGraph(net)
lwe = qlw.LightweightEditor(lwg)
Expand Down Expand Up @@ -115,6 +126,17 @@ def make_rules(cfg : dict,
final_net = harmonize_pass(net_traced)


if last_add_8b:
for n in [nn for nn in final_net.graph.nodes][::-1]:

if n.op == 'call_module':
module = module_of_node(final_net, n)
if isinstance(module, PACTIntegerAdd):
outact_node = [k for k in n.users.keys()][0]
outact_module = module_of_node(final_net, outact_node)
print(f"Setting node {outact_node}'s output n_levels attribute to 256!")
outact_module.n_levels = 256

# the prec. spec file might include layers that were added by the
# harmonization pass; those need to be treated separately
final_nodes = LightweightGraph.build_nodes_list(final_net)
Expand Down
13 changes: 11 additions & 2 deletions systems/CIFAR10/ResNet/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,16 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
'block_cfgs': [( 1, 16, 1),
( 1, 32, 2),
( 1, 64, 2)],
'maxpool': False},
'ResNet8_tb': {'block_class': BasicBlock,
'block_cfgs': [( 1, 20, 1),
( 1, 40, 2),
( 1, 80, 2)],
'maxpool': False},
'ResNet8_ts': {'block_class': BasicBlock,
'block_cfgs': [( 1, 15, 1),
( 1, 30, 2),
( 1, 60, 2)],
'maxpool': False},
'ResNet0': {'block_class': NonResidualBlock,
'block_cfgs': [( 1, 16, 2),
Expand Down Expand Up @@ -251,12 +261,11 @@ def __init__(self,
block_class = _CONFIGS[config]['block_class']
block_cfgs = _CONFIGS[config]['block_cfgs']
do_maxpool = _CONFIGS[config]['maxpool']

out_channels_pilot = 16
in_planes_features = out_channels_pilot
out_planes_features = block_cfgs[-1][1] * block_class.expansion_factor
out_channels_features = out_planes_features
self.act_type = nn.ReLU if activation.lower() == 'relu' else nn.ReLU6
self.act_type = nn.ReLU if activation.lower() == 'relu' else nn.ReLU6 if activation.lower() == 'relu6' else nn.Hardtanh

self.pilot = self._make_pilot(out_channels_pilot)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) if do_maxpool else None
Expand Down
17 changes: 9 additions & 8 deletions systems/DVS128/dvs_cnn/dvs_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
__CNN_CFGS__ = {
'first_try' : [128, 128, 128, 128],
'ninetysix_ch' : [96, 96, 96, 96],
'eighty_ch' : [80, 80, 80, 80],
'reduced_channels' : [64, 64, 64, 64],
'128_channels' : [128, 128, 128, 128],
'96_channels' : [96, 96, 96, 96],
Expand All @@ -27,12 +28,13 @@
'64_channels' : [(2, 1, 64), (2, 2, 64), (2, 4, 64)],
'64_channels_k3' : [(3, 1, 64), (3, 2, 64), (3, 4, 64)],
'64_channels_k4' : [(4, 1, 64), (4, 2, 64), (4, 4, 64)],
'ninetysix_ch' : [(2, 1, 96), (2, 2, 96), (2, 4, 96)],
'eighty_ch' : [(2, 1, 80), (2, 2, 80), (2, 4, 80)],
'128_ch' : [(2, 1, 128), (2, 2, 128), (2, 4, 128)],
'128_channels' : [(2, 1, 128), (2, 2, 128), (2, 4, 128)],
'k3' : [(3, 1, 64), (3, 2, 64), (3, 4, 64)],
'96_channels' : [(2, 1, 96), (2, 2, 96), (2, 4, 96)],
'96_channels_k3' : [(3, 1, 96), (3, 2, 96), (3, 4, 96)],
'80_channels_k3' : [(3, 1, 80), (3, 2, 80), (3, 4, 80)],
'96_channels_k4' : [(4, 1, 96), (4, 2, 96), (4, 4, 96)],
'32_channels' : [(2, 1, 32), (2, 2, 32), (2, 4, 32)],
'32_channels_k3' : [(3, 1, 32), (3, 2, 32), (3, 4, 32)],
Expand All @@ -51,7 +53,7 @@ def get_input_shape(cfg : dict):

class DVSNet2D(nn.Module):
def __init__(self, cnn_cfg_key : str, pool_type : str = "stride", cnn_window : int = 16, activation : str = 'relu',
out_size : int = 11, use_classifier : bool = True, fix_cnn_pool=False, k : int = 3, layer_order : str = 'pool_bn', last_conv_nopad : bool = False, **kwargs):
out_size : int = 11, use_classifier : bool = True, fix_cnn_pool=False, k : int = 3, layer_order : str = 'pool_bn', last_conv_nopad : bool = False, adapter_out_ch : int = 32, **kwargs):
super(DVSNet2D, self).__init__()
cfg = __CNN_CFGS__[cnn_cfg_key]
self.k = k
Expand All @@ -69,22 +71,22 @@ def __init__(self, cnn_cfg_key : str, pool_type : str = "stride", cnn_window : i

adapter_list = []

adapter_list.append(nn.Conv2d(cnn_window, 32, kernel_size=k, padding=k//2, bias=False))
adapter_list.append(nn.Conv2d(cnn_window, adapter_out_ch, kernel_size=k, padding=k//2, bias=False))
if pool_type != 'max_pool':
adapter_pool = nn.AvgPool2d(kernel_size=2)
else:
adapter_pool = nn.MaxPool2d(kernel_size=2)
if layer_order == 'pool_bn':
adapter_list.append(adapter_pool)
adapter_list.append(nn.BatchNorm2d(32))
adapter_list.append(nn.BatchNorm2d(adapter_out_ch))
else:
adapter_list.append(nn.BatchNorm2d(32))
adapter_list.append(nn.BatchNorm2d(adapter_out_ch))
adapter_list.append(adapter_pool)
adapter_list.append(self._act(inplace=True))
adapter = nn.Sequential(*adapter_list)
self.adapter = adapter

features = self._get_features(32, cfg, k, pool_type, self._act, layer_order, last_conv_nopad)
features = self._get_features(adapter_out_ch, cfg, k, pool_type, self._act, layer_order, last_conv_nopad)
self.features = features
# after features block, we should have a 4x4 feature map
if use_classifier:
Expand Down Expand Up @@ -282,8 +284,7 @@ def forward(self, x):
# 1. split it up into cnn_window-sized stacks

if self.inject_eps:
#x = QTensor(x, eps=1.)
pass
x = QTensor(x, eps=1.)
#print(f"type of x - hybridnet: {type(x)}")
cnn_wins = torch.split(x, self.cnn_window, dim=1)
#print(f"eps of x - hybridnet after split: {tuple(w.eps for w in cnn_wins)}")
Expand Down
15 changes: 11 additions & 4 deletions systems/ILSVRC12/MobileNetV2/quantize/pact.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,12 @@ def pact_recipe(net : nn.Module,
conv_cfg = config["PACTConv2d"]
lin_cfg = config["PACTLinear"]
act_cfg = config["PACTUnsignedAct"]

harmonize_cfg = config["harmonize"]
# we may get a config that does not include a harmonization configuration;
# in that case, simply don't harmonize
try:
harmonize_cfg = config["harmonize"]
except KeyError:
harmonize_cfg = None


prec_override_spec = {}
Expand Down Expand Up @@ -124,8 +128,11 @@ def make_rules(cfg : dict,
lwe.shutdown()

# now harmonize the graph according to the configuration
harmonize_pass = HarmonizePACTNetPass(**harmonize_cfg)
final_net = harmonize_pass(net)
if harmonize_cfg is not None:
harmonize_pass = HarmonizePACTNetPass(**harmonize_cfg)
final_net = harmonize_pass(net)
else:
final_net = net

# the prec. spec file might include layers that were added by the
# harmonization pass; those need to be treated separately
Expand Down
15 changes: 10 additions & 5 deletions systems/ILSVRC12/ResNet/quantize/pact.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,10 @@ def pact_recipe(net : nn.Module,
lin_cfg = config["PACTLinear"]
act_cfg = config["PACTUnsignedAct"]

harmonize_cfg = config["harmonize"]
try:
harmonize_cfg = config["harmonize"]
except KeyError:
harmonize_cfg = None

def make_rules(cfg : dict,
rule : type):
Expand Down Expand Up @@ -82,10 +85,12 @@ def make_rules(cfg : dict,
lwe.apply()
lwe.shutdown()
# now harmonize the graph
harmonize_pass = HarmonizePACTNetPass(**harmonize_cfg)
#harmonize_pass = HarmonizePACTNetPass(n_levels=harmonize_cfg["n_levels"])
net_traced = PACT_symbolic_trace(lwg.net)
final_net = harmonize_pass(net_traced)
if harmonize_cfg is not None:
harmonize_pass = HarmonizePACTNetPass(**harmonize_cfg)
net_traced = PACT_symbolic_trace(lwg.net)
final_net = harmonize_pass(net_traced)
else:
final_net = lwg.net

return final_net

Expand Down