1.一般的模型构造、训练、测试流程
1 # 模型构造 2 inputs = keras.Input(shape=(784,), name=\'mnist_input\') 3 h1 = layers.Dense(64, activation=\'relu\')(inputs) 4 h1 = layers.Dense(64, activation=\'relu\')(h1) 5 outputs = layers.Dense(10, activation=\'softmax\')(h1) 6 model = keras.Model(inputs, outputs) 7 # keras.utils.plot_model(model, \'net001.png\', show_shapes=True) 8 9 model.compile(optimizer=keras.optimizers.RMSprop(), 10 loss=keras.losses.SparseCategoricalCrossentropy(), 11 metrics=[keras.metrics.SparseCategoricalAccuracy()]) 12 13 # 载入数据 14 (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() 15 x_train = x_train.reshape(60000, 784).astype(\'float32\') /255 16 x_test = x_test.reshape(10000, 784).astype(\'float32\') /255 17 18 x_val = x_train[-10000:] 19 y_val = y_train[-10000:] 20 21 x_train = x_train[:-10000] 22 y_train = y_train[:-10000] 23 24 # 训练模型 25 history = model.fit(x_train, y_train, batch_size=64, epochs=3, 26 validation_data=(x_val, y_val)) 27 print(\'history:\') 28 print(history.history) 29 30 result = model.evaluate(x_test, y_test, batch_size=128) 31 print(\'evaluate:\') 32 print(result) 33 pred = model.predict(x_test[:2]) 34 print(\'predict:\') 35 print(pred)
2.自定义损失和指标
自定义指标只需继承Metric类, 并重写一下函数
_init_(self),初始化。
update_state(self,y_true,y_pred,sample_weight = None),它使用目标y_true和模型预测y_pred来更新状态变量。
result(self),它使用状态变量来计算最终结果。
reset_states(self),重新初始化度量的状态。
1 # 这是一个简单的示例,显示如何实现CatgoricalTruePositives指标,该指标计算正确分类为属于给定类的样本数量 2 3 class CatgoricalTruePostives(keras.metrics.Metric): 4 def __init__(self, name=\'binary_true_postives\', **kwargs): 5 super(CatgoricalTruePostives, self).__init__(name=name, **kwargs) 6 self.true_postives = self.add_weight(name=\'tp\', initializer=\'zeros\') 7 8 def update_state(self, y_true, y_pred, sample_weight=None): 9 y_pred = tf.argmax(y_pred) 10 y_true = tf.equal(tf.cast(y_pred, tf.int32), tf.cast(y_true, tf.int32)) 11 12 y_true = tf.cast(y_true, tf.float32) 13 14 if sample_weight is not None: 15 sample_weight = tf.cast(sample_weight, tf.float32) 16 y_true = tf.multiply(sample_weight, y_true) 17 18 return self.true_postives.assign_add(tf.reduce_sum(y_true)) 19 20 def result(self): 21 return tf.identity(self.true_postives) 22 23 def reset_states(self): 24 self.true_postives.assign(0.) 25 26 27 model.compile(optimizer=keras.optimizers.RMSprop(1e-3), 28 loss=keras.losses.SparseCategoricalCrossentropy(), 29 metrics=[CatgoricalTruePostives()]) 30 31 model.fit(x_train, y_train, 32 batch_size=64, epochs=3) 33 34 35
1 # 以定义网络层的方式添加网络loss 2 class ActivityRegularizationLayer(layers.Layer): 3 def call(self, inputs): 4 self.add_loss(tf.reduce_sum(inputs) * 0.1) 5 return inputs 6 7 inputs = keras.Input(shape=(784,), name=\'mnist_input\') 8 h1 = layers.Dense(64, activation=\'relu\')(inputs) 9 h1 = ActivityRegularizationLayer()(h1) 10 h1 = layers.Dense(64, activation=\'relu\')(h1) 11 outputs = layers.Dense(10, activation=\'softmax\')(h1) 12 model = keras.Model(inputs, outputs) 13 # keras.utils.plot_model(model, \'net001.png\', show_shapes=True) 14 15 model.compile(optimizer=keras.optimizers.RMSprop(), 16 loss=keras.losses.SparseCategoricalCrossentropy(), 17 metrics=[keras.metrics.SparseCategoricalAccuracy()]) 18 model.fit(x_train, y_train, batch_size=32, epochs=1)
1 # 也可以以定义网络层的方式添加要统计的metric 2 class MetricLoggingLayer(layers.Layer): 3 def call(self, inputs): 4 self.add_metric(keras.backend.std(inputs), 5 name=\'std_of_activation\', 6 aggregation=\'mean\') 7 8 return inputs 9 10 inputs = keras.Input(shape=(784,), name=\'mnist_input\') 11 h1 = layers.Dense(64, activation=\'relu\')(inputs) 12 h1 = MetricLoggingLayer()(h1) 13 h1 = layers.Dense(64, activation=\'relu\')(h1) 14 outputs = layers.Dense(10, activation=\'softmax\')(h1) 15 model = keras.Model(inputs, outputs) 16 # keras.utils.plot_model(model, \'net001.png\', show_shapes=True) 17 18 model.compile(optimizer=keras.optimizers.RMSprop(), 19 loss=keras.losses.SparseCategoricalCrossentropy(), 20 metrics=[keras.metrics.SparseCategoricalAccuracy()]) 21 model.fit(x_train, y_train, batch_size=32, epochs=1)
1 # 也可以直接在model上面加 2 # 也可以以定义网络层的方式添加要统计的metric 3 class MetricLoggingLayer(layers.Layer): 4 def call(self, inputs): 5 self.add_metric(keras.backend.std(inputs), 6 name=\'std_of_activation\', 7 aggregation=\'mean\') 8 9 return inputs 10 11 inputs = keras.Input(shape=(784,), name=\'mnist_input\') 12 h1 = layers.Dense(64, activation=\'relu\')(inputs) 13 h2 = layers.Dense(64, activation=\'relu\')(h1) 14 outputs = layers.Dense(10, activation=\'softmax\')(h2) 15 model = keras.Model(inputs, outputs) 16 17 model.add_metric(keras.backend.std(inputs), 18 name=\'std_of_activation\', 19 aggregation=\'mean\') 20 model.add_loss(tf.reduce_sum(h1)*0.1) 21 22 # keras.utils.plot_model(model, \'net001.png\', show_shapes=True) 23 24 model.compile(optimizer=keras.optimizers.RMSprop(), 25 loss=keras.losses.SparseCategoricalCrossentropy(), 26 metrics=[keras.metrics.SparseCategoricalAccuracy()]) 27 model.fit(x_train, y_train, batch_size=32, epochs=1)
处理使用validation_data传入测试数据,还可以使用validation_split划分验证数据
ps:validation_split只能在用numpy数据训练的情况下使用
1 model.fit(x_train, y_train, batch_size=32, epochs=1, validation_split=0.2)
3.使用tf.data构造数据
1 def get_compiled_model(): 2 inputs = keras.Input(shape=(784,), name=\'mnist_input\') 3 h1 = layers.Dense(64, activation=\'relu\')(inputs) 4 h2 = layers.Dense(64, activation=\'relu\')(h1) 5 outputs = layers.Dense(10, activation=\'softmax\')(h2) 6 model = keras.Model(inputs, outputs) 7 model.compile(optimizer=keras.optimizers.RMSprop(), 8 loss=keras.losses.SparseCategoricalCrossentropy(), 9 metrics=[keras.metrics.SparseCategoricalAccuracy()]) 10 return model 11 model = get_compiled_model() 12 train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) 13 train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) 14 15 val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) 16 val_dataset = val_dataset.batch(64) 17 18 # model.fit(train_dataset, epochs=3) 19 # steps_per_epoch 每个epoch只训练几步 20 # validation_steps 每次验证,验证几步 21 model.fit(train_dataset, epochs=3, steps_per_epoch=100, 22 validation_data=val_dataset, validation_steps=3)
4.样本权重和类权重
“样本权重”数组是一个数字数组,用于指定批处理中每个样本在计算总损失时应具有多少权重。 它通常用于不平衡的分类问题(这个想法是为了给予很少见的类更多的权重)。 当使用的权重是1和0时,该数组可以用作损失函数的掩码(完全丢弃某些样本对总损失的贡献)。
“类权重”dict是同一概念的更具体的实例:它将类索引映射到应该用于属于该类的样本的样本权重。 例如,如果类“0”比数据中的类“1”少两倍,则可以使用class_weight = {0:1.,1:0.5}。
1 # 增加第5类的权重 2 import numpy as np 3 # 样本权重 4 model = get_compiled_model() 5 class_weight = {i:1.0 for i in range(10)} 6 class_weight[5] = 2.0 7 print(class_weight) 8 model.fit(x_train, y_train, 9 class_weight=class_weight, 10 batch_size=64, 11 epochs=4) 12 # 类权重 13 model = get_compiled_model() 14 sample_weight = np.ones(shape=(len(y_train),)) 15 sample_weight[y_train == 5] = 2.0 16 model.fit(x_train, y_train, 17 sample_weight=sample_weight, 18 batch_size=64, 19 epochs=4)
5.多输入多输出模型
1 image_input = keras.Input(shape=(32, 32, 3), name=\'img_input\') 2 timeseries_input = keras.Input(shape=(None, 10), name=\'ts_input\') 3 4 x1 = layers.Conv2D(3, 3)(image_input) 5 x1 = layers.GlobalMaxPooling2D()(x1) 6 7 x2 = layers.Conv1D(3, 3)(timeseries_input) 8 x2 = layers.GlobalMaxPooling1D()(x2) 9 10 x = layers.concatenate([x1, x2]) 11 12 score_output = layers.Dense(1, name=\'score_output\')(x) 13 class_output = layers.Dense(5, activation=\'softmax\', name=\'class_output\')(x) 14 15 model = keras.Model(inputs=[image_input, timeseries_input], 16 outputs=[score_output, class_output]) 17 keras.utils.plot_model(model, \'multi_input_output_model.png\' 18 , show_shapes=True)
1 # 可以为模型指定不同的loss和metrics 2 model.compile( 3 optimizer=keras.optimizers.RMSprop(1e-3), 4 loss=[keras.losses.MeanSquaredError(), 5 keras.losses.CategoricalCrossentropy()]) 6 7 # 还可以指定loss的权重 8 model.compile( 9 optimizer=keras.optimizers.RMSprop(1e-3), 10 loss={\'score_output\': keras.losses.MeanSquaredError(), 11 \'class_output\': keras.losses.CategoricalCrossentropy()}, 12 metrics={\'score_output\': [keras.metrics.MeanAbsolutePercentageError(), 13 keras.metrics.MeanAbsoluteError()], 14 \'class_output\': [keras.metrics.CategoricalAccuracy()]}, 15 loss_weight={\'score_output\': 2., \'class_output\': 1.}) 16 17 # 可以把不需要传播的loss置0 18 model.compile( 19 optimizer=keras.optimizers.RMSprop(1e-3), 20 loss=[None, keras.losses.CategoricalCrossentropy()]) 21 22 # Or dict loss version 23 model.compile( 24 optimizer=keras.optimizers.RMSprop(1e-3), 25 loss={\'class_output\': keras.losses.CategoricalCrossentropy()})
6.使用回调
Keras中的回调是在训练期间(在epoch开始时,batch结束时,epoch结束时等)在不同点调用的对象,可用于实现以下行为:
在培训期间的不同时间点进行验证(超出内置的每个时期验证)
定期检查模型或超过某个精度阈值
在训练似乎平稳时改变模型的学习率
在训练似乎平稳时对顶层进行微调
在培训结束或超出某个性能阈值时发送电子邮件或即时消息通知等等。
可使用的内置回调有
ModelCheckpoint:定期保存模型。
EarlyStopping:当训练不再改进验证指标时停止培训。
TensorBoard:定期编写可在TensorBoard中显示的模型日志(更多细节见“可视化”)。
CSVLogger:将丢失和指标数据流式传输到CSV文件。
等等
6.1回调使用
1 model = get_compiled_model() 2 3 callbacks = [ 4 keras.callbacks.EarlyStopping( 5 # Stop training when `val_loss` is no longer improving 6 monitor=\'val_loss\', 7 # "no longer improving" being defined as "no better than 1e-2 less" 8 min_delta=1e-2, 9 # "no longer improving" being further defined as "for at least 2 epochs" 10 patience=2, 11 verbose=1) 12 ] 13 model.fit(x_train, y_train, 14 epochs=20, 15 batch_size=64, 16 callbacks=callbacks, 17 validation_split=0.2)
1 # checkpoint模型回调 2 model = get_compiled_model() 3 check_callback = keras.callbacks.ModelCheckpoint( 4 filepath=\'mymodel_{epoch}.h5\', 5 save_best_only=True, 6 monitor=\'val_loss\', 7 verbose=1 8 ) 9 10 model.fit(x_train, y_train, 11 epochs=3, 12 batch_size=64, 13 callbacks=[check_callback], 14 validation_split=0.2)
1 # 动态调整学习率 2 initial_learning_rate = 0.1 3 lr_schedule = keras.optimizers.schedules.ExponentialDecay( 4 initial_learning_rate, 5 decay_steps=10000, 6 decay_rate=0.96, 7 staircase=True 8 ) 9 optimizer = keras.optimizers.RMSprop(learning_rate=lr_schedule)
1 # 使用tensorboard 2 tensorboard_cbk = keras.callbacks.TensorBoard(log_dir=\'./full_path_to_your_logs\') 3 model.fit(x_train, y_train, 4 epochs=5, 5 batch_size=64, 6 callbacks=[tensorboard_cbk], 7 validation_split=0.2)
6.2创建自己的回调方法
1 class LossHistory(keras.callbacks.Callback): 2 def on_train_begin(self, logs): 3 self.losses = [] 4 def on_epoch_end(self, batch, logs): 5 self.losses.append(logs.get(\'loss\')) 6 print(\'\nloss:\',self.losses[-1]) 7 8 model = get_compiled_model() 9 10 callbacks = [ 11 LossHistory() 12 ] 13 model.fit(x_train, y_train, 14 epochs=3, 15 batch_size=64, 16 callbacks=callbacks, 17 validation_split=0.2)
7.自己构造训练和验证循环
1 # Get the model. 2 inputs = keras.Input(shape=(784,), name=\'digits\') 3 x = layers.Dense(64, activation=\'relu\', name=\'dense_1\')(inputs) 4 x = layers.Dense(64, activation=\'relu\', name=\'dense_2\')(x) 5 outputs = layers.Dense(10, activation=\'softmax\', name=\'predictions\')(x) 6 model = keras.Model(inputs=inputs, outputs=outputs) 7 8 # Instantiate an optimizer. 9 optimizer = keras.optimizers.SGD(learning_rate=1e-3) 10 # Instantiate a loss function. 11 loss_fn = keras.losses.SparseCategoricalCrossentropy() 12 13 # Prepare the training dataset. 14 batch_size = 64 15 train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) 16 train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size) 17 18 # 自己构造循环 19 for epoch in range(3): 20 print(\'epoch: \', epoch) 21 for step, (x_batch_train, y_batch_train) in enumerate(train_dataset): 22 # 开一个gradient tape, 计算梯度 23 with tf.GradientTape() as tape: 24 logits = model(x_batch_train) 25 26 loss_value = loss_fn(y_batch_train, logits) 27 grads = tape.gradient(loss_value, model.trainable_variables) 28 optimizer.apply_gradients(zip(grads, model.trainable_variables)) 29 30 if step % 200 == 0: 31 print(\'Training loss (for one batch) at step %s: %s\' % (step, float(loss_value))) 32 print(\'Seen so far: %s samples\' % ((step + 1) * 64)) 33
1 # 训练并验证 2 # Get model 3 inputs = keras.Input(shape=(784,), name=\'digits\') 4 x = layers.Dense(64, activation=\'relu\', name=\'dense_1\')(inputs) 5 x = layers.Dense(64, activation=\'relu\', name=\'dense_2\')(x) 6 outputs = layers.Dense(10, activation=\'softmax\', name=\'predictions\')(x) 7 model = keras.Model(inputs=inputs, outputs=outputs) 8 9 # Instantiate an optimizer to train the model. 10 optimizer = keras.optimizers.SGD(learning_rate=1e-3) 11 # Instantiate a loss function. 12 loss_fn = keras.losses.SparseCategoricalCrossentropy() 13 14 # Prepare the metrics. 15 train_acc_metric = keras.metrics.SparseCategoricalAccuracy() 16 val_acc_metric = keras.metrics.SparseCategoricalAccuracy() 17 18 # Prepare the training dataset. 19 batch_size = 64 20 train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) 21 train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size) 22 23 # Prepare the validation dataset. 24 val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) 25 val_dataset = val_dataset.batch(64) 26 27 28 # Iterate over epochs. 29 for epoch in range(3): 30 print(\'Start of epoch %d\' % (epoch,)) 31 32 # Iterate over the batches of the dataset. 33 for step, (x_batch_train, y_batch_train) in enumerate(train_dataset): 34 with tf.GradientTape() as tape: 35 logits = model(x_batch_train) 36 loss_value = loss_fn(y_batch_train, logits) 37 grads = tape.gradient(loss_value, model.trainable_variables) 38 optimizer.apply_gradients(zip(grads, model.trainable_variables)) 39 40 # Update training metric. 41 train_acc_metric(y_batch_train, logits) 42 43 # Log every 200 batches. 44 if step % 200 == 0: 45 print(\'Training loss (for one batch) at step %s: %s\' % (step, float(loss_value))) 46 print(\'Seen so far: %s samples\' % ((step + 1) * 64)) 47 48 # Display metrics at the end of each epoch. 49 train_acc = train_acc_metric.result() 50 print(\'Training acc over epoch: %s\' % (float(train_acc),)) 51 # Reset training metrics at the end of each epoch 52 train_acc_metric.reset_states() 53 54 # Run a validation loop at the end of each epoch. 55 for x_batch_val, y_batch_val in val_dataset: 56 val_logits = model(x_batch_val) 57 # Update val metrics 58 val_acc_metric(y_batch_val, val_logits) 59 val_acc = val_acc_metric.result() 60 val_acc_metric.reset_states() 61 print(\'Validation acc: %s\' % (float(val_acc),))
1 ## 添加自己构造的loss, 每次只能看到最新一次训练增加的loss 2 class ActivityRegularizationLayer(layers.Layer): 3 4 def call(self, inputs): 5 self.add_loss(1e-2 * tf.reduce_sum(inputs)) 6 return inputs 7 8 inputs = keras.Input(shape=(784,), name=\'digits\') 9 x = layers.Dense(64, activation=\'relu\', name=\'dense_1\')(inputs) 10 # Insert activity regularization as a layer 11 x = ActivityRegularizationLayer()(x) 12 x = layers.Dense(64, activation=\'relu\', name=\'dense_2\')(x) 13 outputs = layers.Dense(10, activation=\'softmax\', name=\'predictions\')(x) 14 15 model = keras.Model(inputs=inputs, outputs=outputs) 16 logits = model(x_train[:64]) 17 print(model.losses) 18 logits = model(x_train[:64]) 19 logits = model(x_train[64: 128]) 20 logits = model(x_train[128: 192]) 21 print(model.losses)
1 # 将loss添加进求导中 2 optimizer = keras.optimizers.SGD(learning_rate=1e-3) 3 4 for epoch in range(3): 5 print(\'Start of epoch %d\' % (epoch,)) 6 7 for step, (x_batch_train, y_batch_train) in enumerate(train_dataset): 8 with tf.GradientTape() as tape: 9 logits = model(x_batch_train) 10 loss_value = loss_fn(y_batch_train, logits) 11 12 # Add extra losses created during this forward pass: 13 loss_value += sum(model.losses) 14 15 grads = tape.gradient(loss_value, model.trainable_variables) 16 optimizer.apply_gradients(zip(grads, model.trainable_variables)) 17 18 # Log every 200 batches. 19 if step % 200 == 0: 20 print(\'Training loss (for one batch) at step %s: %s\' % (step, float(loss_value))) 21 print(\'Seen so far: %s samples\' % ((step + 1) * 64))
如果还有问题未能得到解决,搜索887934385交流群,进入后下载资料工具安装包等。最后,感谢观看!
本站文章如无特殊说明,均为本站原创,如若转载,请注明出处:TensorFlow2.0教程-使用keras训练模型 - Python技术站