本文在下述博文的基础上,进行整理并针对Keras2.0修改了个别错误,完成小样本情况下的简单人脸识别CNN模型。
http://blog.csdn.net/u012162613/article/details/43277187
1 # -*- coding: utf-8 -*- 2 """ 3 Created on Mon Jun 26 09:56:29 2017 4 5 @author: xiaoxue 6 """ 7 8 from __future__ import print_function 9 import numpy 10 import matplotlib.pyplot as plt 11 numpy.random.seed(1337) # for reproducibility 12 13 from PIL import Image 14 15 from keras.models import Sequential 16 from keras.layers.core import Dense, Dropout, Activation, Flatten 17 from keras.layers.convolutional import Conv2D, MaxPooling2D 18 from keras.optimizers import SGD 19 from keras.utils import np_utils 20 21 # There are 40 different classes 22 nb_classes = 40 23 nb_epoch = 40 24 batch_size = 40 25 26 # input image dimensions 27 img_rows, img_cols = 57, 47 28 # number of convolutional filters to use 29 nb_filters1, nb_filters2 = 5, 10 30 # size of pooling area for max pooling 31 nb_pool = 2 32 # convolution kernel size 33 nb_conv = 3 34 35 def load_data(dataset_path): 36 img = Image.open(dataset_path) 37 img_ndarray = numpy.asarray(img, dtype='float64')/256 38 #400pictures,size:57*47=2679 39 faces=numpy.empty((400,2679)) 40 for row in range(20): 41 for column in range(20): 42 faces[row*20+column]=numpy.ndarray.flatten(img_ndarray [row*57:(row+1)*57,column*47:(column+1)*47]) 43 44 label=numpy.empty(400) 45 for i in range(40): 46 label[i*10:i*10+10]=i 47 label=label.astype(numpy.int) 48 49 #train:320,valid:40,test:40 50 train_data=numpy.empty((320,2679)) 51 train_label=numpy.empty(320) 52 valid_data=numpy.empty((40,2679)) 53 valid_label=numpy.empty(40) 54 test_data=numpy.empty((40,2679)) 55 test_label=numpy.empty(40) 56 57 for i in range(40): 58 train_data[i*8:i*8+8]=faces[i*10:i*10+8] 59 train_label[i*8:i*8+8]=label[i*10:i*10+8] 60 valid_data[i]=faces[i*10+8] 61 valid_label[i]=label[i*10+8] 62 test_data[i]=faces[i*10+9] 63 test_label[i]=label[i*10+9] 64 65 rval = [(train_data, train_label), (valid_data, valid_label), 66 (test_data, test_label)] 67 return rval 68 69 def Net_model(lr=0.005,decay=1e-6,momentum=0.9): 70 model = Sequential() 71 model.add(Conv2D(nb_filters1, (nb_conv, nb_conv), 72 input_shape=(img_rows, img_cols, 1 ), 73 padding='same')) 74 model.add(Activation('tanh')) 75 model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool))) 76 77 model.add(Conv2D(nb_filters2,kernel_size=(nb_conv, nb_conv))) 78 model.add(Activation('tanh')) 79 model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool))) 80 #model.add(Dropout(0.25)) 81 82 model.add(Flatten()) 83 model.add(Dense(1000)) #Full connection 84 model.add(Activation('tanh')) 85 #model.add(Dropout(0.5)) 86 model.add(Dense(nb_classes)) 87 model.add(Activation('softmax')) 88 89 sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True) 90 model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=["acc"]) 91 92 return model 93 94 def train_model(model,X_train,Y_train,X_val,Y_val): 95 result = model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, 96 verbose=2, validation_data=(X_val, Y_val)) 97 # show_accuracy=True, 98 model.save_weights('model_weights.h5',overwrite=True) 99 return model ,result 100 101 def test_model(model,X,Y): 102 model.load_weights('model_weights.h5') 103 score = model.evaluate(X, Y, verbose=0) #, show_accuracy=True 104 print('Test score:', score[0]) 105 # print('Test accuracy:', score[1]) 106 return score 107 108 if __name__ == '__main__': 109 # the data, shuffled and split between tran and test sets 110 (X_train, y_train), (X_val, y_val),(X_test, y_test) = load_data('olivettifaces.gif') 111 112 X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1) 113 X_val = X_val.reshape(X_val.shape[0], img_rows, img_cols, 1) 114 X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1) 115 print('X_train shape:', X_train.shape) 116 print(X_train.shape[0], 'train samples') 117 print(X_val.shape[0], 'validate samples') 118 print(X_test.shape[0], 'test samples') 119 120 # convert class vectors to binary class matrices 121 Y_train = np_utils.to_categorical(y_train, nb_classes) 122 Y_val = np_utils.to_categorical(y_val, nb_classes) 123 Y_test = np_utils.to_categorical(y_test, nb_classes) 124 125 model=Net_model() 126 model,result = train_model(model,X_train,Y_train,X_val,Y_val) 127 # score=test_model(model,X_test,Y_test) #展示模型在验证数据上的效果 128 # model.load_weights('model_weights.h5') 129 classes=model.predict_classes(X_test,verbose=0) #展示模型在测试数据上的效果 130 test_accuracy = numpy.mean(numpy.equal(y_test,classes)) 131 print("accuarcy:",test_accuracy) 132 plt.figure 133 plt.plot(result.epoch,result.history['acc'],label='acc') 134 plt.plot(result.epoch,result.history['val_acc'],label='val_acc') 135 plt.scatter(result.epoch,result.history['acc'],marker='*') 136 plt.scatter(result.epoch,result.history['val_acc']) 137 plt.legend(loc='best') 138 plt.show() 139 140 plt.figure 141 plt.plot(result.epoch,result.history['loss'],label="loss") 142 plt.plot(result.epoch,result.history['val_loss'],label="val_loss") 143 plt.scatter(result.epoch,result.history['loss'],marker='*') 144 plt.scatter(result.epoch,result.history['val_loss'],marker='*') 145 plt.legend(loc='best') 146 plt.show()
运行结果如下图所示:
X_train shape: (320, 57, 47, 1)
320 train samples
40 validate samples
40 test samples
Train on 320 samples, validate on 40 samples
Epoch 1/40
0s - loss: 3.7571 - acc: 0.0156 - val_loss: 3.6561 - val_acc: 0.0500
Epoch 2/40
0s - loss: 3.6847 - acc: 0.0469 - val_loss: 3.6116 - val_acc: 0.1000
Epoch 3/40
0s - loss: 3.6023 - acc: 0.1063 - val_loss: 3.5312 - val_acc: 0.1500
Epoch 4/40
0s - loss: 3.4942 - acc: 0.1625 - val_loss: 3.4197 - val_acc: 0.2500
Epoch 5/40
0s - loss: 3.3360 - acc: 0.3312 - val_loss: 3.2416 - val_acc: 0.4750
Epoch 6/40
0s - loss: 3.0926 - acc: 0.5125 - val_loss: 2.9375 - val_acc: 0.5750
Epoch 7/40
0s - loss: 2.6818 - acc: 0.6219 - val_loss: 2.4856 - val_acc: 0.7250
Epoch 8/40
0s - loss: 2.1218 - acc: 0.8000 - val_loss: 1.9517 - val_acc: 0.8500
Epoch 9/40
0s - loss: 1.5356 - acc: 0.8969 - val_loss: 1.4674 - val_acc: 0.8750
Epoch 10/40
0s - loss: 1.0489 - acc: 0.9438 - val_loss: 1.1259 - val_acc: 0.9250
Epoch 11/40
0s - loss: 0.7106 - acc: 0.9688 - val_loss: 0.8722 - val_acc: 0.9500
Epoch 12/40
0s - loss: 0.4951 - acc: 0.9781 - val_loss: 0.7287 - val_acc: 0.9250
Epoch 13/40
0s - loss: 0.3659 - acc: 0.9875 - val_loss: 0.6243 - val_acc: 0.9500
Epoch 14/40
0s - loss: 0.2785 - acc: 0.9969 - val_loss: 0.5288 - val_acc: 0.9500
Epoch 15/40
0s - loss: 0.2220 - acc: 0.9938 - val_loss: 0.4754 - val_acc: 0.9500
Epoch 16/40
0s - loss: 0.1808 - acc: 1.0000 - val_loss: 0.4412 - val_acc: 0.9500
Epoch 17/40
0s - loss: 0.1520 - acc: 1.0000 - val_loss: 0.4031 - val_acc: 0.9500
Epoch 18/40
0s - loss: 0.1310 - acc: 1.0000 - val_loss: 0.3762 - val_acc: 0.9750
Epoch 19/40
0s - loss: 0.1157 - acc: 1.0000 - val_loss: 0.3489 - val_acc: 0.9750
Epoch 20/40
0s - loss: 0.1027 - acc: 1.0000 - val_loss: 0.3313 - val_acc: 0.9750
Epoch 21/40
0s - loss: 0.0925 - acc: 1.0000 - val_loss: 0.3206 - val_acc: 0.9750
Epoch 22/40
0s - loss: 0.0838 - acc: 1.0000 - val_loss: 0.3087 - val_acc: 0.9750
Epoch 23/40
0s - loss: 0.0765 - acc: 1.0000 - val_loss: 0.2952 - val_acc: 0.9750
Epoch 24/40
0s - loss: 0.0709 - acc: 1.0000 - val_loss: 0.2895 - val_acc: 0.9750
Epoch 25/40
0s - loss: 0.0657 - acc: 1.0000 - val_loss: 0.2829 - val_acc: 0.9750
Epoch 26/40
0s - loss: 0.0611 - acc: 1.0000 - val_loss: 0.2695 - val_acc: 0.9750
Epoch 27/40
0s - loss: 0.0571 - acc: 1.0000 - val_loss: 0.2639 - val_acc: 0.9750
Epoch 28/40
0s - loss: 0.0538 - acc: 1.0000 - val_loss: 0.2591 - val_acc: 0.9750
Epoch 29/40
0s - loss: 0.0506 - acc: 1.0000 - val_loss: 0.2544 - val_acc: 0.9750
Epoch 30/40
0s - loss: 0.0481 - acc: 1.0000 - val_loss: 0.2503 - val_acc: 0.9750
Epoch 31/40
0s - loss: 0.0452 - acc: 1.0000 - val_loss: 0.2444 - val_acc: 0.9750
Epoch 32/40
0s - loss: 0.0430 - acc: 1.0000 - val_loss: 0.2392 - val_acc: 0.9750
Epoch 33/40
0s - loss: 0.0410 - acc: 1.0000 - val_loss: 0.2368 - val_acc: 0.9750
Epoch 34/40
0s - loss: 0.0393 - acc: 1.0000 - val_loss: 0.2329 - val_acc: 0.9750
Epoch 35/40
0s - loss: 0.0376 - acc: 1.0000 - val_loss: 0.2293 - val_acc: 0.9750
Epoch 36/40
0s - loss: 0.0359 - acc: 1.0000 - val_loss: 0.2274 - val_acc: 0.9750
Epoch 37/40
0s - loss: 0.0345 - acc: 1.0000 - val_loss: 0.2251 - val_acc: 0.9750
Epoch 38/40
0s - loss: 0.0331 - acc: 1.0000 - val_loss: 0.2223 - val_acc: 0.9750
Epoch 39/40
0s - loss: 0.0320 - acc: 1.0000 - val_loss: 0.2185 - val_acc: 0.9750
Epoch 40/40
0s - loss: 0.0308 - acc: 1.0000 - val_loss: 0.2173 - val_acc: 0.9750
accuarcy: 1.0
本站文章如无特殊说明,均为本站原创,如若转载,请注明出处:Keras搭建简单的人脸识别CNN模型 - Python技术站