视频学习来源

https://www.bilibili.com/video/av40787141?from=search&seid=17003307842787199553

笔记

Mnist分类程序

import numpy as np
from keras.datasets import mnist  #将会从网络下载mnist数据集
from keras.utils import np_utils
from keras.models import Sequential  #序列模型
from keras.layers import Dense  
from keras.optimizers import SGD

C:\Program Files (x86)\Microsoft Visual Studio\Shared\Anaconda3_64\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.

from ._conv import register_converters as _register_converters

Using TensorFlow backend.

第一次运行时,建议先下载数据集

1 打开cmd 输入python 回车

2 将上面的代码输入,导入相关文件包库

3 输入

(x_train,y_train),(x_test,y_test)=mnist.load_data()
回车
启动下载。加载完成便可在jupyter notebook中运行

#载入数据
(x_train,y_train),(x_test,y_test)=mnist.load_data()
#查看格式
#(60000,28,28)
print('x_shape:',x_train.shape)
#(60000)
print('y_shape:',y_train.shape)
#(60000,28,28)->(60000,784)
#行数60000,列-1表示自动设置
#除以255是做数据归一化处理
x_train=x_train.reshape(x_train.shape[0],-1)/255.0 #转换数据格式
x_test=x_test.reshape(x_test.shape[0],-1)/255.0 #转换数据格式
#label标签转换成 one  hot 形式
y_train=np_utils.to_categorical(y_train,num_classes=10) #分成10类
y_test=np_utils.to_categorical(y_test,num_classes=10) #分成10类


#创建模型,输入754个神经元,输出10个神经元
#偏执值初始值设为1(默认为0)
model=Sequential([
    Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax')
])

#定义优化器
#学习速率为0.2
sgd=SGD(lr=0.2)

#定义优化器,损失函数,训练效果中计算准确率
model.compile(
    optimizer=sgd, #sgd优化器
    loss='mse',  #损失用均方差
    metrics=['accuracy'],  #计算准确率
)

#训练(不同于之前,这是新的训练方式)
#六万张,每次训练32张,训练10个周期(六万张全部训练完算一个周期)
model.fit(x_train,y_train,batch_size=32,epochs=10)

#评估模型
loss,accuracy=model.evaluate(x_test,y_test)

print('\ntest loss',loss)
print('accuracy',accuracy)

x_shape: (60000, 28, 28)
y_shape: (60000,)
Epoch 1/10
60000/60000 [==============================] - 4s 65us/step - loss: 0.0384 - acc: 0.7673
Epoch 2/10
60000/60000 [==============================] - 5s 88us/step - loss: 0.0204 - acc: 0.8808
Epoch 3/10
60000/60000 [==============================] - 3s 57us/step - loss: 0.0178 - acc: 0.8934
Epoch 4/10
60000/60000 [==============================] - 3s 48us/step - loss: 0.0165 - acc: 0.8995
Epoch 5/10
60000/60000 [==============================] - 3s 45us/step - loss: 0.0157 - acc: 0.9038
Epoch 6/10
60000/60000 [==============================] - 3s 46us/step - loss: 0.0151 - acc: 0.9064
Epoch 7/10
60000/60000 [==============================] - 3s 48us/step - loss: 0.0146 - acc: 0.9093
Epoch 8/10
60000/60000 [==============================] - 3s 49us/step - loss: 0.0143 - acc: 0.9110
Epoch 9/10
60000/60000 [==============================] - 3s 46us/step - loss: 0.0140 - acc: 0.9126
Epoch 10/10
60000/60000 [==============================] - 3s 52us/step - loss: 0.0138 - acc: 0.9137
10000/10000 [==============================] - 0s 28us/step
 
test loss 0.01308599321632646
accuracy 0.9169

改用交叉熵

#定义优化器,损失函数,训练效果中计算准确率

model.compile(

optimizer=sgd, #sgd优化器

loss='categorical_crossentropy', #损失用交叉熵,速度会更快

metrics=['accuracy'], #计算准确率

)

完整代码

import numpy as np
from keras.datasets import mnist  #将会从网络下载mnist数据集
from keras.utils import np_utils
from keras.models import Sequential  #序列模型
from keras.layers import Dense  
from keras.optimizers import SGD

C:\Program Files (x86)\Microsoft Visual Studio\Shared\Anaconda3_64\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.

from ._conv import register_converters as _register_converters

Using TensorFlow backend.

#载入数据
(x_train,y_train),(x_test,y_test)=mnist.load_data()
#查看格式
#(60000,28,28)
print('x_shape:',x_train.shape)
#(60000)
print('y_shape:',y_train.shape)
#(60000,28,28)->(60000,784)
#行数60000,列-1表示自动设置
#除以255是做数据归一化处理
x_train=x_train.reshape(x_train.shape[0],-1)/255.0 #转换数据格式
x_test=x_test.reshape(x_test.shape[0],-1)/255.0 #转换数据格式
#label标签转换成 one  hot 形式
y_train=np_utils.to_categorical(y_train,num_classes=10) #分成10类
y_test=np_utils.to_categorical(y_test,num_classes=10) #分成10类


#创建模型,输入754个神经元,输出10个神经元
#偏执值初始值设为1(默认为0)
model=Sequential([
    Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax')
])

#定义优化器
#学习速率为0.2
sgd=SGD(lr=0.2)

#定义优化器,损失函数,训练效果中计算准确率
model.compile(
    optimizer=sgd, #sgd优化器
    loss='categorical_crossentropy',  #损失用交叉熵,速度会更快
    metrics=['accuracy'],  #计算准确率
)

#训练(不同于之前,这是新的训练方式)
#六万张,每次训练32张,训练10个周期(六万张全部训练完算一个周期)
model.fit(x_train,y_train,batch_size=32,epochs=10)

#评估模型
loss,accuracy=model.evaluate(x_test,y_test)

print('\ntest loss',loss)
print('accuracy',accuracy)

x_shape: (60000, 28, 28)

y_shape: (60000,)

Epoch 1/10

60000/60000 [==============================] - 5s 82us/step - loss: 0.3782 - acc: 0.8926

Epoch 2/10

60000/60000 [==============================] - 4s 60us/step - loss: 0.3031 - acc: 0.9145

Epoch 3/10

60000/60000 [==============================] - 3s 54us/step - loss: 0.2894 - acc: 0.9186

Epoch 4/10

60000/60000 [==============================] - 3s 55us/step - loss: 0.2826 - acc: 0.9204

Epoch 5/10

60000/60000 [==============================] - 3s 54us/step - loss: 0.2773 - acc: 0.9227

Epoch 6/10

60000/60000 [==============================] - 3s 57us/step - loss: 0.2748 - acc: 0.9226

Epoch 7/10

60000/60000 [==============================] - 4s 59us/step - loss: 0.2715 - acc: 0.9239

Epoch 8/10

60000/60000 [==============================] - 4s 61us/step - loss: 0.2694 - acc: 0.9252

Epoch 9/10

60000/60000 [==============================] - 3s 55us/step - loss: 0.2664 - acc: 0.9254

Epoch 10/10

60000/60000 [==============================] - 4s 67us/step - loss: 0.2654 - acc: 0.9267

10000/10000 [==============================] - 0s 29us/step

test loss 0.28354966330975295

accuracy 0.9229

改用后精度在提高,速度也在变快