1. 训练

# --coding:utf-8--
import os
import sys
import glob
import argparse
import matplotlib.pyplot as plt

from keras import __version__
from keras.applications.inception_v3 import InceptionV3, preprocess_input
#from keras.applications.inception_v3_matt import InceptionV3, preprocess_input

from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD

def get_nb_files(directory):
  """Get number of files by searching directory recursively"""
  if not os.path.exists(directory):
    return 0
  cnt = 0
  for r, dirs, files in os.walk(directory):
    for dr in dirs:
      cnt += len(glob.glob(os.path.join(r, dr + "/*")))
  return cnt
#train_num = get_nb_files('/home/pandafish/AnacondaProjects/Inceptionv3/dataset_my/train')  2500
#print(train_num)
#input('wait...')

# 数据准备
IM_WIDTH, IM_HEIGHT = 299, 299 #InceptionV3指定的图片尺寸
FC_SIZE = 1024                # 全连接层的节点个数
##NB_IV3_LAYERS_TO_FREEZE = 172  # 冻结层的数量
NB_IV3_LAYERS_TO_FREEZE = 0  # 冻结层的数量


train_dir = '/home/dl/local_repo/data/mosaic1/mosaic_train'  # 训练集数据
val_dir = '/home/dl/local_repo/data/mosaic1/mosaic_valid' # 验证集数据
output_model_file = '/home/dl/local_repo/data/mosaic1/mosaic.hdf5'
wights_path = '/home/dl/local_repo/data/mosaic1/mosaic.h5'


nb_classes= 2
nb_epoch = 10
batch_size = 32

nb_train_samples = get_nb_files(train_dir)      # 训练样本个数
nb_classes = len(glob.glob(train_dir + "/*"))  # 分类数
nb_val_samples = get_nb_files(val_dir)       #验证集样本个数
nb_epoch = int(nb_epoch)                # epoch数量
batch_size = int(batch_size)           

# 图片生成器
train_datagen =  ImageDataGenerator(
  preprocessing_function=preprocess_input,
  ##rotation_range=30,
  width_shift_range=0.2,
  height_shift_range=0.2,
  shear_range=0.2,
  zoom_range=0.2,
  horizontal_flip=True
)
test_datagen = ImageDataGenerator(
  preprocessing_function=preprocess_input,
  ##rotation_range=30,
  width_shift_range=0.2,
  height_shift_range=0.2,
  shear_range=0.2,
  zoom_range=0.2,
  horizontal_flip=True
)

# 训练数据与测试数据
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,class_mode='categorical')

validation_generator = test_datagen.flow_from_directory(
val_dir,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,class_mode='categorical')

# 添加新层
def add_new_last_layer(base_model, nb_classes):
  """
  添加最后的层
  输入
  base_model和分类数量
  输出
  新的keras的model
  """
  x = base_model.output
  x = GlobalAveragePooling2D()(x)
  x = Dense(FC_SIZE, activation='relu')(x) #new FC layer, random init
  predictions = Dense(nb_classes, activation='softmax')(x) #new softmax layer
  model = Model(input=base_model.input, output=predictions)
  return model
# 冻上NB_IV3_LAYERS之前的层
def setup_to_finetune(model):
  """Freeze the bottom NB_IV3_LAYERS and retrain the remaining top layers.

  note: NB_IV3_LAYERS corresponds to the top 2 inception blocks in the inceptionv3 arch

  Args:
    model: keras model
  """
  for layer in model.layers[:NB_IV3_LAYERS_TO_FREEZE]:
     layer.trainable = False
  for layer in model.layers[NB_IV3_LAYERS_TO_FREEZE:]:
     layer.trainable = True
  model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])

# 设置网络结构
model = InceptionV3(weights='imagenet', include_top=False)
model = add_new_last_layer(model, nb_classes)
setup_to_finetune(model)

# 模式二训练
history_ft = model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
nb_epoch=nb_epoch,
validation_data=validation_generator,
nb_val_samples=nb_val_samples,
class_weight='auto1')

# 模型保存
model.save(output_model_file)
model.save_weights(wights_path)
# 画图
def plot_training(history):
  acc = history.history['acc']
  val_acc = history.history['val_acc']
  loss = history.history['loss']
  val_loss = history.history['val_loss']
  epochs = range(len(acc))
  plt.plot(epochs, acc, 'r.')
  plt.plot(epochs, val_acc, 'r')
  plt.title('Training and validation accuracy')
  plt.figure()
  plt.plot(epochs, loss, 'r.')
  plt.plot(epochs, val_loss, 'r-')
  plt.title('Training and validation loss')
  plt.show()

# 训练的acc_loss图
plot_training(history_ft)

 

 2 . 服务器上测试图片

# --coding:utf-8--
# 定义层
import sys
import argparse
import numpy as np
from PIL import Image
from io import BytesIO
import matplotlib.pyplot as plt

from keras.preprocessing import image
from keras.models import load_model
from keras.applications.inception_v3 import preprocess_input

# 狂阶图片指定尺寸
target_size = (299, 299) #fixed size for InceptionV3 architecture

# 预测函数
# 输入:model,图片,目标尺寸
# 输出:预测predict
def predict(model, img, target_size):
  """Run model prediction on image
  Args:
    model: keras model
    img: PIL format image
    target_size: (w,h) tuple
  Returns:
    list of predicted labels and their probabilities
  """
  if img.size != target_size:
    img = img.resize(target_size)

  x = image.img_to_array(img)
  x = np.expand_dims(x, axis=0)
  x = preprocess_input(x)
  preds = model.predict(x)
  return preds[0]

# 画图函数
# 预测之后画图,这里默认是猫狗,当然可以修改label

labels = ("mosaic", "normal")
def plot_preds(image, preds,labels):
  """Displays image and the top-n predicted probabilities in a bar graph
  Args:
    image: PIL image
    preds: list of predicted labels and their probabilities
  """
  plt.imshow(image)
  plt.axis('off')
  plt.figure()
  plt.barh([0, 1], preds, alpha=0.5)
  plt.yticks([0, 1], labels)
  plt.xlabel('Probability')
  plt.xlim(0,1.01)
  plt.tight_layout()
  plt.show()

# 载入模型
model = load_model('/home/dl/local_repo/data/mosaic1/mosaic.model')

# 本地图片
img = Image.open('test.jpg')
preds = predict(model, img, target_size)
print preds
##plot_preds(img, preds,labels)##因为没有显示器  所以不画图了

 

 

 

3.   hdf5 转为 pb

# -*- coding: utf-8 -*-
from keras.models import load_model
import tensorflow as tf
import os 
import os.path as osp
from keras import backend as K
#路径参数
# input_path = 'input path'
# weight_file = 'weight.h5'
# weight_file_path = osp.join(input_path,weight_file)

weight_file = 'mosaic.hdf5'
weight_file_path = '/home/dl/local_repo/data/mosaic1/mosaic.hdf5'

output_graph_name = weight_file[:-4] + '.pb'

#转换函数
def h5_to_pb(h5_model,output_dir,model_name,out_prefix = "output_",log_tensorboard = True):
    if osp.exists(output_dir) == False:
        os.mkdir(output_dir)
    out_nodes = []
    for i in range(len(h5_model.outputs)):
        out_nodes.append(out_prefix + str(i + 1))
        tf.identity(h5_model.output[i],out_prefix + str(i + 1))
    sess = K.get_session()
    from tensorflow.python.framework import graph_util,graph_io
    init_graph = sess.graph.as_graph_def()
    main_graph = graph_util.convert_variables_to_constants(sess,init_graph,out_nodes)
    graph_io.write_graph(main_graph,output_dir,name = model_name,as_text = False)
    if log_tensorboard:
        from tensorflow.python.tools import import_pb_to_tensorboard
        import_pb_to_tensorboard.import_to_tensorboard(osp.join(output_dir,model_name),output_dir)
#输出路径
output_dir = osp.join(os.getcwd(),"trans_model")
#加载模型
h5_model = load_model(weight_file_path)
h5_to_pb(h5_model,output_dir = output_dir,model_name = output_graph_name)
print('model saved')

 

 
 
 
 
 
4.  Opencv  测试       参考 : opencv-4.0.1modulesdnntesttest_tf_importer.cpp
TEST(Test_TensorFlow, read_inception)
{
    Net net;
    {
        const string model = findDataFile("dnn/tensorflow_inception_graph.pb", false);
        net = readNetFromTensorflow(model);
        ASSERT_FALSE(net.empty());
    }
    net.setPreferableBackend(DNN_BACKEND_OPENCV);

    Mat sample = imread(_tf("grace_hopper_227.png"));
    ASSERT_TRUE(!sample.empty());
    Mat input;
    resize(sample, input, Size(299, 299));
    input -= 128; // mean sub

    Mat inputBlob = blobFromImage(input);

    net.setInput(inputBlob, "input");
    Mat out = net.forward("softmax2");

    std::cout << out.dims << std::endl;
}

 

 
错误及解决办法
1.   readNetFromTensorflow   crash .
Keras 训练 inceptionV3  并移植到OpenCV4.0  in  C++

 

这里是由于FusedBatchNorm 越界了  blobs的size是3   取不到blobs[3]

 

 

2. 提示libnvinfer.so.5 找不到

下载对应版本的 TensorRT         https://developer.nvidia.com/nvidia-tensorrt-5x-download

tar ***.tar.gz

cd TensorRT*/lib

cp libnvinfer.so.5.0.2 /usr/lib/x86_64-linux-gnu/
ln -s /usr/lib/x86_64-linux-gnu/libnvinfer.so.5.0.2 /usr/lib/x86_64-linux-gnu/libnvinfer.so.5