默认caffe已经编译好了,并且编译好了pycaffe

首先准备训练和测试数据集,这里准备两类数据,分别放在文件夹0和文件夹1中(之所以使用0和1命名数据类别,是因为方便标注数据类别,直接用文件夹的名字即可)。即训练数据集:/data/train/0、/data/train/1  训练数据集:/data/val/0、/data/val/1。

数据准备好之后,创建记录数据文件和对应标签的txt文件

(1)创建训练数据集的train.txt

 1 import os
 2 f =open(r'train.txt',"w")
 3 path = os.getcwd()+'/data/train/'
 4 for filename in os.listdir(path) :
 5     count = 0
 6     for file in os.listdir(path+filename) :
 7         count = count + 1
 8         ff='/'+filename+"/"+file+" "+filename+"\n"
 9         f.write(ff)
10     print '{} class: {}'.format(filename,count)
11 f.close()

(2)创建测试数据集val.txt

 1 import os
 2 f =open(r'val.txt',"w")
 3 path = os.getcwd()+'/data/val/'
 4 for filename in os.listdir(path) :
 5     count = 0
 6     for file in os.listdir(path+filename) :
 7         count = count + 1
 8         ff='/'+filename+"/"+file+" "+filename+"\n"
 9         f.write(ff)
10     print '{} class: {}'.format(filename,count)
11 f.close()

注意,txt中文件的路径为: /类别文件夹名/文件名(空格,不能是制表符)类别

2 创建LMDB数据文件

创建createlmdb.sh使用caffe自带的(bulid/tools下的)convert_imageset创建LMDB数据文件,主要是注意数据文件以及上一步生成的txt文件的位置,注意数据文件的RESIZE,后边在进行训练和测试的时候还要用到,其余就是文件的路径的问题了。

 1 #!/usr/bin/env sh
 2 
 3 CAFFE_ROOT=/home/caf/object/caffe
 4 TOOLS=$CAFFE_ROOT/build/tools
 5 TRAIN_DATA_ROOT=/home/caf/wk/learn/data/train
 6 VAL_DATA_ROOT=/home/caf/wk/learn/data/val
 7 DATA=/home/caf/wk/learn/data
 8 EXAMPLE=/home/caf/wk/learn/data/lmdb
 9 # Set RESIZE=true to resize the images to 60 x 60. Leave as false if images have
10 # already been resized using another tool.
11 RESIZE=true
12 if $RESIZE; then
13   RESIZE_HEIGHT=227
14   RESIZE_WIDTH=227
15 else
16   RESIZE_HEIGHT=0
17   RESIZE_WIDTH=0
18 fi
19 
20 if [ ! -d "$TRAIN_DATA_ROOT" ]; then
21   echo "Error: TRAIN_DATA_ROOT is not a path to a directory: $TRAIN_DATA_ROOT"
22   echo "Set the TRAIN_DATA_ROOT variable in create_face_48.sh to the path" \
23        "where the face_48 training data is stored."
24   exit 1
25 fi
26 
27 if [ ! -d "$VAL_DATA_ROOT" ]; then
28   echo "Error: VAL_DATA_ROOT is not a path to a directory: $VAL_DATA_ROOT"
29   echo "Set the VAL_DATA_ROOT variable in create_face_48.sh to the path" \
30        "where the face_48 validation data is stored."
31   exit 1
32 fi
33 
34 echo "Creating train lmdb..."
35 
36 GLOG_logtostderr=1 $TOOLS/convert_imageset \
37     --resize_height=$RESIZE_HEIGHT \
38     --resize_width=$RESIZE_WIDTH \
39     --shuffle \
40     $TRAIN_DATA_ROOT \
41     $DATA/train.txt \
42     $EXAMPLE/face_train_lmdb
43 
44 echo "Creating val lmdb..."
45 
46 GLOG_logtostderr=1 $TOOLS/convert_imageset \
47     --resize_height=$RESIZE_HEIGHT \
48     --resize_width=$RESIZE_WIDTH \
49     --shuffle \
50     $VAL_DATA_ROOT \
51     $DATA/val.txt \
52     $EXAMPLE/face_val_lmdb
53 
54 echo "Done."

 

3 定义网络

caffe接受的网络模型是prototxt文件,对于caffe网络的定义语法有详细的解释,本次实验用的是AlexNet,保存在train_val.prototxt

  1 name: "AlexNet"
  2 layer {
  3   name: "data"
  4   type: "Data"
  5   top: "data"
  6   top: "label"
  7   include {
  8     phase: TRAIN
  9   }
 10   data_param {
 11     source: "/home/caf/wk/learn/data/lmdb/face_train_lmdb"
 12     batch_size: 256
 13     backend: LMDB
 14   }
 15 }
 16 layer {
 17   name: "data"
 18   type: "Data"
 19   top: "data"
 20   top: "label"
 21   include {
 22     phase: TEST
 23   }
 24   data_param {
 25     source: "/home/caf/wk/learn/data/lmdb/face_val_lmdb"
 26     batch_size: 50
 27     backend: LMDB
 28   }
 29 }
 30 layer {
 31   name: "conv1"
 32   type: "Convolution"
 33   bottom: "data"
 34   top: "conv1"
 35   param {
 36     lr_mult: 1
 37     decay_mult: 1
 38   }
 39   param {
 40     lr_mult: 2
 41     decay_mult: 0
 42   }
 43   convolution_param {
 44     num_output: 96
 45     kernel_size: 11
 46     stride: 4
 47     weight_filler {
 48       type: "gaussian"
 49       std: 0.01
 50     }
 51     bias_filler {
 52       type: "constant"
 53       value: 0
 54     }
 55   }
 56 }
 57 layer {
 58   name: "relu1"
 59   type: "ReLU"
 60   bottom: "conv1"
 61   top: "conv1"
 62 }
 63 layer {
 64   name: "norm1"
 65   type: "LRN"
 66   bottom: "conv1"
 67   top: "norm1"
 68   lrn_param {
 69     local_size: 5
 70     alpha: 0.0001
 71     beta: 0.75
 72   }
 73 }
 74 layer {
 75   name: "pool1"
 76   type: "Pooling"
 77   bottom: "norm1"
 78   top: "pool1"
 79   pooling_param {
 80     pool: MAX
 81     kernel_size: 3
 82     stride: 2
 83   }
 84 }
 85 layer {
 86   name: "conv2"
 87   type: "Convolution"
 88   bottom: "pool1"
 89   top: "conv2"
 90   param {
 91     lr_mult: 1
 92     decay_mult: 1
 93   }
 94   param {
 95     lr_mult: 2
 96     decay_mult: 0
 97   }
 98   convolution_param {
 99     num_output: 256
100     pad: 2
101     kernel_size: 5
102     group: 2
103     weight_filler {
104       type: "gaussian"
105       std: 0.01
106     }
107     bias_filler {
108       type: "constant"
109       value: 0.1
110     }
111   }
112 }
113 layer {
114   name: "relu2"
115   type: "ReLU"
116   bottom: "conv2"
117   top: "conv2"
118 }
119 layer {
120   name: "norm2"
121   type: "LRN"
122   bottom: "conv2"
123   top: "norm2"
124   lrn_param {
125     local_size: 5
126     alpha: 0.0001
127     beta: 0.75
128   }
129 }
130 layer {
131   name: "pool2"
132   type: "Pooling"
133   bottom: "norm2"
134   top: "pool2"
135   pooling_param {
136     pool: MAX
137     kernel_size: 3
138     stride: 2
139   }
140 }
141 layer {
142   name: "conv3"
143   type: "Convolution"
144   bottom: "pool2"
145   top: "conv3"
146   param {
147     lr_mult: 1
148     decay_mult: 1
149   }
150   param {
151     lr_mult: 2
152     decay_mult: 0
153   }
154   convolution_param {
155     num_output: 384
156     pad: 1
157     kernel_size: 3
158     weight_filler {
159       type: "gaussian"
160       std: 0.01
161     }
162     bias_filler {
163       type: "constant"
164       value: 0
165     }
166   }
167 }
168 layer {
169   name: "relu3"
170   type: "ReLU"
171   bottom: "conv3"
172   top: "conv3"
173 }
174 layer {
175   name: "conv4"
176   type: "Convolution"
177   bottom: "conv3"
178   top: "conv4"
179   param {
180     lr_mult: 1
181     decay_mult: 1
182   }
183   param {
184     lr_mult: 2
185     decay_mult: 0
186   }
187   convolution_param {
188     num_output: 384
189     pad: 1
190     kernel_size: 3
191     group: 2
192     weight_filler {
193       type: "gaussian"
194       std: 0.01
195     }
196     bias_filler {
197       type: "constant"
198       value: 0.1
199     }
200   }
201 }
202 layer {
203   name: "relu4"
204   type: "ReLU"
205   bottom: "conv4"
206   top: "conv4"
207 }
208 layer {
209   name: "conv5"
210   type: "Convolution"
211   bottom: "conv4"
212   top: "conv5"
213   param {
214     lr_mult: 1
215     decay_mult: 1
216   }
217   param {
218     lr_mult: 2
219     decay_mult: 0
220   }
221   convolution_param {
222     num_output: 256
223     pad: 1
224     kernel_size: 3
225     group: 2
226     weight_filler {
227       type: "gaussian"
228       std: 0.01
229     }
230     bias_filler {
231       type: "constant"
232       value: 0.1
233     }
234   }
235 }
236 layer {
237   name: "relu5"
238   type: "ReLU"
239   bottom: "conv5"
240   top: "conv5"
241 }
242 layer {
243   name: "pool5"
244   type: "Pooling"
245   bottom: "conv5"
246   top: "pool5"
247   pooling_param {
248     pool: MAX
249     kernel_size: 3
250     stride: 2
251   }
252 }
253 layer {
254   name: "fc6"
255   type: "InnerProduct"
256   bottom: "pool5"
257   top: "fc6"
258   param {
259     lr_mult: 1
260     decay_mult: 1
261   }
262   param {
263     lr_mult: 2
264     decay_mult: 0
265   }
266   inner_product_param {
267     num_output: 4096
268     weight_filler {
269       type: "gaussian"
270       std: 0.005
271     }
272     bias_filler {
273       type: "constant"
274       value: 0.1
275     }
276   }
277 }
278 layer {
279   name: "relu6"
280   type: "ReLU"
281   bottom: "fc6"
282   top: "fc6"
283 }
284 layer {
285   name: "drop6"
286   type: "Dropout"
287   bottom: "fc6"
288   top: "fc6"
289   dropout_param {
290     dropout_ratio: 0.5
291   }
292 }
293 layer {
294   name: "fc7"
295   type: "InnerProduct"
296   bottom: "fc6"
297   top: "fc7"
298   param {
299     lr_mult: 1
300     decay_mult: 1
301   }
302   param {
303     lr_mult: 2
304     decay_mult: 0
305   }
306   inner_product_param {
307     num_output: 4096
308     weight_filler {
309       type: "gaussian"
310       std: 0.005
311     }
312     bias_filler {
313       type: "constant"
314       value: 0.1
315     }
316   }
317 }
318 layer {
319   name: "relu7"
320   type: "ReLU"
321   bottom: "fc7"
322   top: "fc7"
323 }
324 layer {
325   name: "drop7"
326   type: "Dropout"
327   bottom: "fc7"
328   top: "fc7"
329   dropout_param {
330     dropout_ratio: 0.5
331   }
332 }
333 layer {
334   name: "fc8"
335   type: "InnerProduct"
336   bottom: "fc7"
337   top: "fc8"
338   param {
339     lr_mult: 1
340     decay_mult: 1
341   }
342   param {
343     lr_mult: 2
344     decay_mult: 0
345   }
346   inner_product_param {
347     num_output: 2
348     weight_filler {
349       type: "gaussian"
350       std: 0.01
351     }
352     bias_filler {
353       type: "constant"
354       value: 0
355     }
356   }
357 }
358 layer {
359   name: "accuracy"
360   type: "Accuracy"
361   bottom: "fc8"
362   bottom: "label"
363   top: "accuracy"
364   include {
365     phase: TEST
366   }
367 }
368 layer {
369   name: "loss"
370   type: "SoftmaxWithLoss"
371   bottom: "fc8"
372   bottom: "label"
373   top: "loss"
374 }
375 layer {
376   name: "prob"
377   type: "Softmax"
378   bottom: "fc8"
379   top: "prob"
380 }

View Code