Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 19 additions & 2 deletions deephub/detection_model/necks/second_fpn.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from mmcv.cnn import build_conv_layer, build_norm_layer, build_upsample_layer
from mmcv.runner import BaseModule, auto_fp16
from torch import nn as nn

from torch.quantization import QuantStub, DeQuantStub


class SECONDFPN(BaseModule):
Expand Down Expand Up @@ -37,6 +37,8 @@ def __init__(self,
self.in_channels = in_channels
self.out_channels = out_channels
self.fp16_enabled = False
self.quant = QuantStub()
self.dequant = DeQuantStub()

deblocks = []
for i, out_channel in enumerate(out_channels):
Expand Down Expand Up @@ -80,7 +82,22 @@ def forward(self, x):
list[torch.Tensor]: Multi-level feature maps.
"""
assert len(x) == len(self.in_channels)
ups = [deblock(x[i]) for i, deblock in enumerate(self.deblocks)]
# x = map(self.quant, x)
# x = list(x)

ups = []
for i, deblock in enumerate(self.deblocks):
temp = x[i]
for j, item in enumerate(deblock):
if j == 0:
temp = item(temp)
elif j == 1:
temp = item(self.quant(temp))

ups.append(temp)


# ups = [deblock(x[i]) for i, deblock in enumerate(self.deblocks)]

if len(ups) > 1:
out = torch.cat(ups, dim=1)
Expand Down
19 changes: 18 additions & 1 deletion deephub/detection_model/pointpillars.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
from .backbones import SECOND
from .necks import SECONDFPN
from .heads import Anchor3DHead
import numpy as np
from torch.quantization import QuantStub, DeQuantStub

class Pointpillars(BaseModule):
"""Backbone network for SECOND/PointPillars/PartA2/MVXNet.
Expand Down Expand Up @@ -43,6 +45,8 @@ def __init__(self,
in_channels=384,
feat_channels=384
)
self.quant = QuantStub()
self.dequant = DeQuantStub()

def forward(self,
voxels,
Expand All @@ -59,8 +63,14 @@ def forward(self,
Returns:
List: Result of model.
"""
voxels= self.quant(voxels)
# num_points= self.quant(num_points)
# coors= self.quant(coors)
x = self.extract_feat(voxels, num_points, coors)
bbox_preds, scores, dir_scores = self.bbox_head(x)
# bbox_preds = self.dequant(bbox_preds)
# scores = self.dequant(scores)
# dir_scores = self.dequant(dir_scores)
return bbox_preds, scores, dir_scores

def extract_feat(self,
Expand All @@ -79,9 +89,16 @@ def extract_feat(self,
"""
voxel_features = self.voxel_encoder(voxels, num_points, coors)
batch_size = coors[-1, 0] + 1 # refactor
# assert batch_size == 1
assert batch_size == 1
voxel_features = self.dequant(voxel_features)
x = self.middle_encoder(voxel_features, coors, batch_size)
x = self.quant(x)
x = self.backbone(x)

x = map(self.dequant, x)
x = list(x)

# print(size(x))
x = self.neck(x)
return x

16 changes: 15 additions & 1 deletion deephub/detection_model/voxel_encoders/pillar_encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from torch import nn

from .utils import PFNLayer, get_paddings_indicator

from torch.quantization import QuantStub, DeQuantStub

class PillarFeatureNet(nn.Module):
"""Pillar Feature Net.
Expand Down Expand Up @@ -85,6 +85,8 @@ def __init__(self,
self.y_offset = self.vy / 2 + point_cloud_range[1]
self.z_offset = self.vz / 2 + point_cloud_range[2]
self.point_cloud_range = point_cloud_range
self.quant = QuantStub()
self.dequant = DeQuantStub()

@force_fp32(out_fp16=True)
def forward(self, features, num_points, coors):
Expand All @@ -100,12 +102,14 @@ def forward(self, features, num_points, coors):
Returns:
torch.Tensor: Features of pillars.
"""
features = self.dequant(features)
features_ls = [features]
# Find distance of x, y, and z from cluster center
if self._with_cluster_center:
points_mean = features[:, :, :3].sum(
dim=1, keepdim=True) / num_points.type_as(features).view(-1, 1, 1)
f_cluster = features[:, :, :3] - points_mean
# f_cluster = self.quant(f_cluster)
features_ls.append(f_cluster)

# Find distance of x, y, and z from pillar center
Expand All @@ -131,7 +135,16 @@ def forward(self, features, num_points, coors):
features_ls.append(points_dist)

# Combine together feature decorations
# features_ls = self.dequant(features_ls)
# for i1 in features_ls:
# for i2 in i1:
# for i3 in i2:
# i3 = self.dequant(i3)

features = torch.cat(features_ls, dim=-1)
# features = self.quant(features)
# features = [self.quant(i) for i in features]

# The feature decorations were calculated without regard to whether
# pillar was empty. Need to ensure that
# empty pillars remain set to zeros.
Expand All @@ -142,5 +155,6 @@ def forward(self, features, num_points, coors):
for pfn in self.pfn_layers:
features = pfn(features, num_points)

features = self.quant(features)
return features.squeeze(1)

10 changes: 7 additions & 3 deletions deephub/detection_model/voxel_encoders/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from mmcv.runner import auto_fp16
from torch import nn
from torch.nn import functional as F

from torch.quantization import QuantStub, DeQuantStub


def get_paddings_indicator(actual_num, max_num, axis=0):
Expand Down Expand Up @@ -67,6 +67,8 @@ def __init__(self,

assert mode in ['max', 'avg']
self.mode = mode
self.quant = QuantStub()
self.dequant = DeQuantStub()

@auto_fp16(apply_to=('inputs'), out_fp32=True)
def forward(self, inputs, num_voxels=None, aligned_distance=None):
Expand All @@ -84,11 +86,13 @@ def forward(self, inputs, num_voxels=None, aligned_distance=None):
Returns:
torch.Tensor: Features of Pillars.
"""
x = self.linear(inputs)
x = self.quant(inputs)
x = self.linear(x)
x = self.dequant(x)
x = self.norm(x.permute(0, 2, 1).contiguous()).permute(0, 2,
1).contiguous()
x = F.relu(x)

# x = self.dequant(x)
if self.mode == 'max':
if aligned_distance is not None:
x = x.mul(aligned_distance.unsqueeze(-1))
Expand Down
155 changes: 155 additions & 0 deletions demo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
import os
import sys
import torch

from deephub.detection_model import Pointpillars, Centerpoint
from mmcv.runner import load_checkpoint
from model.model_deployor.deployor_utils import create_input
from nni.compression.pytorch.pruning import L1NormPruner
from model.model_deployor.deployor import deploy
from nni.compression.pytorch.speedup import ModelSpeedup
from model.model_compressor.compressor import *

import time
import faulthandler;faulthandler.enable()
import numpy as np
import copy
import torch.nn.utils.prune as prune
def main():
start = time.time()
model = Pointpillars()

load_checkpoint(model, 'checkpoints/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth', map_location='cpu')
model.cpu()

# for name, parameters in model.named_parameters():
# print(name, ':', parameters.dtype)

model.eval()
# print(model)
# print('--------------------------------------------------------')
input_names = ['voxels', 'num_points', 'coors']
output_names = ['scores', 'bbox_preds', 'dir_scores']
dynamic_axes = {'voxels': {0: 'voxels_num'},
'num_points': {0: 'voxels_num'},
'coors': {0: 'voxels_num'}}
# dynamic_axes = None

# #0 NNI pruning
# ----------------------------------------
# config_list = [{
# 'sparsity_per_layer': 0.5,
# 'op_types': ['Linear', 'Conv2d']
# }]

# pruner = L1NormPruner(model, config_list)

# _, masks = pruner.compress()

# for name, mask in masks.items():
# print(name, ' sparsity : ', '{:.2}'.format(mask['weight'].sum() / mask['weight'].numel()))

# pruner._unwrap_model()

# from nni.compression.pytorch.speedup import ModelSpeedup
# ----------------------------------------


pcd = 'test/test_model_ops/data/kitti/kitti_000008.bin'
checkpoint = 'checkpoints/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car_20220331_134606-d42d15ed.pth'
dataset = 'kitti'
model_name = 'pointpillars'
device = 'cpu'
backend = 'onnxruntime'
output = 'pointpillars'
fp16 = False

data, model_inputs = create_input(pcd, dataset, model_name, device)

backend_file = deploy(model, model_inputs, input_names, output_names, dynamic_axes,
backend=backend, output_file=output, fp16=fp16, dataset=dataset)
print("SIZESIZESIZE : ", np.array(model_inputs[0].cpu()).shape)
print("SIZESIZESIZE : ", np.array(model_inputs[1].cpu()).shape)
print("SIZESIZESIZE : ", np.array(model_inputs[2].cpu()).shape)


# #0 NNI pruning
#----------------------------------------
# ModelSpeedup(model, [model_inputs[0], model_inputs[1].short(), model_inputs[2].short()], masks).speedup_model()
#----------------------------------------

# #1 dynamic quant (torch)
#----------------------------------------
# model.cpu()
# torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8)
# model.cuda()
# dynamic_quant(model)
#----------------------------------------

# # #2 static quant (torch)
# #----------------------------------------
# # print(model)
input_data = [model_inputs[0], model_inputs[1], model_inputs[2]]

static_quant(model, input_data)
# model.qconfig = torch.quantization.get_default_qconfig('fbgemm')

# model_prepared = torch.quantization.prepare(model)

# model_prepared(model_inputs[0], model_inputs[1], model_inputs[2])
# # model_prepared.cpu()


# model_prepared.cpu()
# model_int8 = torch.quantization.convert(model_prepared, inplace=True)

# # print(model_int8)
# print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# # model_int8.cpu()
# # model_int8.cuda()
# # data, model_inputs_cuda = create_input(pcd, dataset, model_name, 'cuda:0')
# torch_out = model_int8(model_inputs[0], model_inputs[1], model_inputs[2])
#----------------------------------------

# for n,p in model.named_parameters():
# print(n)
# print(p)

# torch_prune
#----------------------------------------
# prune_list = [torch.nn.Conv2d, torch.nn.Linear]
# amount_list = [0.3, 0.9]

# torch_prune(model, prune_list, amount_list)
#----------------------------------------

# for n,p in model.named_parameters():
# print(n)
# print(p)


# torch_out = model(model_inputs[0], model_inputs[1], model_inputs[2])


# if backend == 'onnxruntime':
# import onnxruntime

# ort_session = onnxruntime.InferenceSession(backend_file)

# input_dict = {}
# input_dict['voxels'] = model_inputs[0].cpu().numpy()
# input_dict['num_points'] = model_inputs[1].cpu().numpy()
# input_dict['coors'] = model_inputs[2].cpu().numpy()
# ort_output = ort_session.run(['scores', 'bbox_preds', 'dir_scores'], input_dict)

# outputs = {}
# outputs['scores'] = torch.tensor(ort_output[0])
# outputs['bbox_preds'] = torch.tensor(ort_output[1])
# outputs['dir_scores'] = torch.tensor(ort_output[2])

# print('onnx : inference successful!')

print(time.time() - start)

if __name__ == '__main__':
main()
Loading