Skip to content

Commit

Permalink
some update
Browse files Browse the repository at this point in the history
  • Loading branch information
david8862 committed Dec 8, 2022
1 parent 72c0b64 commit 5943583
Show file tree
Hide file tree
Showing 11 changed files with 241 additions and 57 deletions.
16 changes: 16 additions & 0 deletions configs/aarch64-linux-gnu.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
set(CMAKE_SYSTEM_NAME Linux)
set(CMAKE_SYSTEM_PROCESSOR aarch64)
set(CMAKE_LIBRARY_ARCHITECTURE aarch64-linux-gnu)

set(PROJECT_ROOT_DIR "${CMAKE_CURRENT_LIST_DIR}/../..")

set(CMAKE_C_COMPILER aarch64-linux-gnu-gcc)
set(CMAKE_CXX_COMPILER aarch64-linux-gnu-g++)

set(CMAKE_SYSROOT ${PROJECT_ROOT_DIR}/toolchain/aarch64-linux-gnu/${CMAKE_LIBRARY_ARCHITECTURE}/sysroot)
set(CMAKE_FIND_ROOT_PATH ${PROJECT_ROOT_DIR}/toolchain/aarch64-linux-gnu/${CMAKE_LIBRARY_ARCHITECTURE}/sysroot)

set(CMAKE_CROSSCOMPILING true)
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
9 changes: 5 additions & 4 deletions inference/MNN/yoloDetection.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,9 @@
//
// Created by Xiaobin Zhang on 2019/09/20.
//

#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include "MNN/ImageProcess.hpp"
#include "MNN/Interpreter.hpp"
#define MNN_OPEN_TIME_TRACE
#include <algorithm>
#include <fstream>
#include <functional>
Expand All @@ -24,8 +20,13 @@
#include <getopt.h>
#include <string.h>
#include <sys/time.h>

#define MNN_OPEN_TIME_TRACE
#include "MNN/ImageProcess.hpp"
#include "MNN/Interpreter.hpp"
#include "MNN/AutoTime.hpp"
#include "MNN/ErrorCode.hpp"

#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_RESIZE_IMPLEMENTATION
Expand Down
2 changes: 1 addition & 1 deletion inference/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ Refer to [MNN build guide](https://www.yuque.com/mnn/cn/build_linux), we need to
# ./tools/script/get_model.sh # optional
# mkdir build && cd build
# cmake [-DCMAKE_TOOLCHAIN_FILE=<cross-compile toolchain file>]
[-DMNN_BUILD_QUANTOOLS=ON -DMNN_BUILD_CONVERTER=ON -DMNN_BUILD_BENCHMARK=ON -DMNN_BUILD_TRAIN=ON -MNN_BUILD_TRAIN_MINI=ON -MNN_USE_OPENCV=OFF] ..
[-DMNN_BUILD_QUANTOOLS=ON -DMNN_BUILD_CONVERTER=ON -DMNN_BUILD_BENCHMARK=ON -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_TRAIN_MINI=ON -DMNN_USE_OPENCV=OFF] ..
&& make -j4
### MNN OpenCL backend build
Expand Down
165 changes: 165 additions & 0 deletions tools/evaluation/convkernel_check.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
generate activation pattern image for convolution kernel of specified layers
with trained YOLO model
Reference:
https://blog.csdn.net/qq_37781464/article/details/122946523
'''
import os, sys, argparse
import numpy as np

from tensorflow.keras.models import Model, load_model
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, DepthwiseConv2D, SeparableConv2D
import tensorflow.keras.backend as K

# compatible with TF 2.x
if tf.__version__.startswith('2'):
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.keras import backend as K
tf.disable_eager_execution()

sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'))
from common.utils import get_custom_objects, optimize_tf_gpu

optimize_tf_gpu(tf, K)


# convert tensor to visible image
def deprocess_image(x):
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
x += 0.5
x = np.clip(x, 0, 1)
# x *= 255
# x = np.clip(x, 0, 255)
# x/=255.
return x


def get_layer_type(layer):
if isinstance(layer, Conv2D):
return 'Conv2D'
elif isinstance(layer, DepthwiseConv2D):
return 'DepthwiseConv2D'
elif isinstance(layer, SeparableConv2D):
return 'SeparableConv2D'
else:
return 'Others'


# generate visualize image of conv kernel by maximize activation
# of conv kernel on random input image, with sgd on a loss function
def generate_pattern(model, layer_name, kernel_index, model_input_shape):

height, width = model_input_shape
# get conv kernel output of layer, and
# use mean value as loss
layer_output = model.get_layer(layer_name).output
loss = K.mean(layer_output[:, :, :, kernel_index])
# get gradients of loss for model input
grads = K.gradients(loss, model.input)[0]
# normalize the gradients
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)

# stochastic gradient descent to maximize the loss
iterate = K.function([model.input], [loss, grads])

# random input image
input_img_data = np.random.random((1, height, width, 3)) * 20 + 128.

# run iteration_num loops to collect pattern
step = 1.
iteration_num = 40
for i in range(iteration_num):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
img = input_img_data[0]

return deprocess_image(img)


def convkernel_check(model_path, model_input_shape, layer_names, kernel_num, output_path):
# load model
custom_object_dict = get_custom_objects()
model = load_model(model_path, compile=False, custom_objects=custom_object_dict)
K.set_learning_phase(0)
model.summary()

height, width = model_input_shape

os.makedirs(output_path, exist_ok=True)
for layer_name in layer_names:
layer = model.get_layer(layer_name)
if get_layer_type(layer) == 'Others':
print('layer {} is not convolution type, bypass it'.format(layer_name))
continue

layer_channel = layer.output.shape[-1]
if layer_channel < kernel_num:
print('layer {} does not have {} conv kernel. only pick {}'.format(layer_name, kernel_num, layer_channel))
layer_kernel_num = layer_channel
else:
layer_kernel_num = kernel_num

print('Creating pattern for layer {}...'.format(layer_name))

# prepare featuremap display grid array
column_num = 16 # 16 kernel pattern column per row
margin = 5

row_num = layer_kernel_num // column_num
display_grid = np.zeros((height*row_num + margin*(row_num-1), width*column_num + margin*(column_num-1), 3))

# fill one grid with a kernel pattern
for row in range(row_num):
for col in range(column_num):
pattern_img = generate_pattern(model, layer_name, row*column_num+col, model_input_shape)
horizontal_start = col*width + col*margin
horizontal_end = horizontal_start + width
vertical_start = row*height + row*margin
vertical_end = vertical_start + height
display_grid[vertical_start:vertical_end, horizontal_start:horizontal_end, :] = pattern_img

# adjust display size for grid image
row_scale = 1. / height
col_scale = 1. / width
plt.figure(figsize=(col_scale * display_grid.shape[1],
row_scale * display_grid.shape[0] + 1.8))

plt.title('Conv kernel of layer '+layer_name)
plt.imshow(display_grid)

# save kernel pattern image & show it
pattern_file = os.path.join(output_path, layer_name+'.jpg')
plt.savefig(pattern_file, dpi=75)
#plt.show()
print('Kernel pattern of layer {} has been saved'.format(layer_name))



def main():
parser = argparse.ArgumentParser(description='check kernel pattern of specified conv layer for trained YOLO model')
parser.add_argument('--model_path', type=str, required=True, help='model file to predict')
parser.add_argument('--model_input_shape', help='model image input shape as <height>x<width>, default=%(default)s', type=str, default='416x416')
parser.add_argument('--layer_names', type=str, required=True, help='layer names to check conv kernel, separate with comma if more than one')
parser.add_argument('--kernel_num', help='conv kernel number to check, default=%(default)s', type=int, default=64)
parser.add_argument('--output_path', type=str, required=True, help='output featuremap file directory')

args = parser.parse_args()

height, width = args.model_input_shape.split('x')
model_input_shape = (int(height), int(width))
assert (model_input_shape[0]%32 == 0 and model_input_shape[1]%32 == 0), 'model_input_shape should be multiples of 32'

layer_names = args.layer_names.split(',')

convkernel_check(args.model_path, model_input_shape, layer_names, args.kernel_num, args.output_path)


if __name__ == "__main__":
main()
90 changes: 46 additions & 44 deletions tools/evaluation/featuremap_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
import os, sys, argparse
import glob
import numpy as np
import cv2
from PIL import Image
from tensorflow.keras.models import Model, load_model
import matplotlib.pyplot as plt
Expand All @@ -31,75 +30,76 @@



def generate_featuremap(image_path, model_path, model_input_shape, layer_name, featuremap_path):
def generate_featuremap(image_path, model_path, model_input_shape, layer_names, featuremap_path):
# load model
custom_object_dict = get_custom_objects()
model = load_model(model_path, compile=False, custom_objects=custom_object_dict)
K.set_learning_phase(0)
model.summary()

# create featuremap model
featuremap_output = model.get_layer(layer_name).output
featuremap_model = Model(inputs=model.input, outputs=featuremap_output)
featuremap_outputs = [model.get_layer(layer_name).output for layer_name in layer_names]
featuremap_model = Model(inputs=model.input, outputs=featuremap_outputs)

os.makedirs(featuremap_path, exist_ok=True)
# get image file list or single image
if os.path.isdir(image_path):
jpeg_files = glob.glob(os.path.join(image_path, '*.jpeg'))
jpg_files = glob.glob(os.path.join(image_path, '*.jpg'))
image_list = jpeg_files + jpg_files

#assert os.path.isdir(featuremap_path), 'need to provide a path for output featuremap'
os.makedirs(featuremap_path, exist_ok=True)
featuremap_list = [os.path.join(featuremap_path, os.path.splitext(os.path.basename(image_name))[0]+'.jpg') for image_name in image_list]
else:
image_list = [image_path]
featuremap_list = [featuremap_path]

# loop the sample list to generate all featuremaps
for i, (image_file, featuremap_file) in enumerate(zip(image_list, featuremap_list)):
for i, image_file in enumerate(image_list):
# process input
img = Image.open(image_file).convert('RGB')
image = np.array(img, dtype='uint8')
image_data = preprocess_image(img, model_input_shape)

# get featuremap output
featuremap = featuremap_model.predict([image_data])
# get featuremap outputs
featuremaps = featuremap_model.predict([image_data])
if isinstance(featuremaps, np.ndarray):
featuremaps = [featuremaps]

# prepare featuremap display grid array
images_per_row = 16
feature_height, feature_width, feature_num = featuremap.shape[1:]
column_num = feature_num // images_per_row
display_grid = np.zeros((feature_height*column_num, feature_width*images_per_row))
for layer_name, featuremap in zip(layer_names, featuremaps):
# prepare featuremap display grid array
column_num = 16 # 16 feature map column per row
feature_height, feature_width, feature_num = featuremap.shape[1:]
row_num = feature_num // column_num
display_grid = np.zeros((feature_height*row_num, feature_width*column_num))

# fill one grid with a featuremap
for col in range(column_num):
for row in range(images_per_row):
channel_image = featuremap[0,:, :,col * images_per_row + row]
# fill one grid with a featuremap
for row in range(row_num):
for col in range(column_num):
channel_image = featuremap[0, :, :, row * column_num + col]

# rescale featuremap to more visable value for display
channel_image -= channel_image.mean()
channel_image /= channel_image.std()
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255).astype('uint8')
# rescale featuremap to more visable value for display
channel_image -= channel_image.mean()
channel_image /= channel_image.std()
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255).astype('uint8')

display_grid[col * feature_height : (col + 1) * feature_height,
row * feature_width : (row + 1) * feature_width] = channel_image
display_grid[row * feature_height : (row + 1) * feature_height,
col * feature_width : (col + 1) * feature_width] = channel_image

# adjust display size for grid image
col_scale = 1. / feature_height
row_scale = 1. / feature_width
plt.figure(figsize=(row_scale * display_grid.shape[1],
col_scale * display_grid.shape[0]))
# adjust display size for grid image
row_scale = 1. / feature_height
col_scale = 1. / feature_width
plt.figure(figsize=(col_scale * display_grid.shape[1],
row_scale * display_grid.shape[0] + 1.8))

plt.title('Feature map of layer '+layer_name+'\nHeight:'+str(feature_height)+', Width:'+str(feature_width)+', Channel:'+str(feature_num))
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis')
plt.title('Feature map of layer '+layer_name+'\nHeight:'+str(feature_height)+', Width:'+str(feature_width)+', Channel:'+str(feature_num))
plt.grid(False)
plt.imshow(display_grid, cmap='viridis')

# save chart image & show it
plt.savefig(featuremap_file, dpi=75)
#plt.show()
print('Feature map of layer {} for image {} has been saved'.format(layer_name, image_file))
# save feature map & show it
featuremap_file = os.path.join(featuremap_path, os.path.splitext(os.path.basename(image_file))[0]+'_'+layer_name+'.jpg')
plt.savefig(featuremap_file, dpi=75)
#plt.show()
plt.clf()
print('Feature map of layer {} for image {} has been saved'.format(layer_name, image_file))



Expand All @@ -108,16 +108,18 @@ def main():
parser.add_argument('--model_path', type=str, required=True, help='model file to predict')
parser.add_argument('--image_path', type=str, required=True, help='image file or directory for input')
parser.add_argument('--model_input_shape', help='model image input shape as <height>x<width>, default=%(default)s', type=str, default='416x416')
parser.add_argument('--layer_name', type=str, required=True, help='layer name to check feature map')
parser.add_argument('--featuremap_path', type=str, required=True, help='output featuremap file or directory')
parser.add_argument('--layer_names', type=str, required=True, help='layer names to check feature map, separate with comma if more than one')
parser.add_argument('--featuremap_path', type=str, required=True, help='output featuremap file directory')

args = parser.parse_args()

height, width = args.model_input_shape.split('x')
model_input_shape = (int(height), int(width))
assert (model_input_shape[0]%32 == 0 and model_input_shape[1]%32 == 0), 'model_input_shape should be multiples of 32'

generate_featuremap(args.image_path, args.model_path, model_input_shape, args.layer_name, args.featuremap_path)
layer_names = args.layer_names.split(',')

generate_featuremap(args.image_path, args.model_path, model_input_shape, layer_names, args.featuremap_path)


if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion tools/model_converter/custom_tflite_convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ def _convert_tf2_model(flags):
converter = lite.TFLiteConverterV2.from_saved_model(flags.saved_model_dir)
elif flags.keras_model_file:
custom_object_dict = get_custom_objects()
model = keras.models.load_model(flags.keras_model_file, custom_objects = custom_object_dict)
model = keras.models.load_model(flags.keras_model_file, compile=False, custom_objects=custom_object_dict)
converter = lite.TFLiteConverterV2.from_keras_model(model)

# Convert the model.
Expand Down
Loading

0 comments on commit 5943583

Please sign in to comment.