接触过深度学习的人一定听过keras,为了学习的方便,接下来将要仔细的讲解一下这keras库是如何构建1D-CNN深度学习框架的

from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Embedding, Conv1D, MaxPooling1D, GlobalMaxPooling1D, Dense,Reshape
from keras.optimizers import RMSprop
import warnings
warnings.filterwarnings("ignore")

模式一

import numpy as np
x_train = np.random.randint(100,size=(1200,100))
y_train = np.random.randint(100,size=(1200,1))
model = Sequential()
model.add(Embedding(max_features, 500, input_length = len(x_train[1])))# 输入(1200,100),输出(10,100,500)
model.add(Conv1D(32, 7, activation = 'relu'))
model.add(MaxPooling1D(5))
model.add(Conv1D(32, 7, activation = 'relu'))
model.add(GlobalMaxPooling1D())
model.add(Dense(1))

model.summary()

model.compile(optimizer = RMSprop(lr = 1e-4),
             loss = 'binary_crossentropy',
             metrics = ['acc'])

history = model.fit(x_train, y_train,
                   epochs = 10,
                   batch_size = 10,
                   validation_split = 0.2)
Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_1 (Embedding)      (None, 100, 500)          5000000   
_________________________________________________________________
conv1d_1 (Conv1D)            (None, 94, 32)            112032    
_________________________________________________________________
max_pooling1d_1 (MaxPooling1 (None, 18, 32)            0         
_________________________________________________________________
conv1d_2 (Conv1D)            (None, 12, 32)            7200      
_________________________________________________________________
global_max_pooling1d_1 (Glob (None, 32)                0         
_________________________________________________________________
dense_1 (Dense)              (None, 1)                 33        
=================================================================
Total params: 5,119,265
Trainable params: 5,119,265
Non-trainable params: 0
_________________________________________________________________
Train on 960 samples, validate on 240 samples
Epoch 1/10
960/960 [==============================] - 17s 17ms/step - loss: -108.8848 - acc: 0.0063 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 2/10
960/960 [==============================] - 2s 2ms/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 3/10
960/960 [==============================] - 2s 2ms/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 4/10
960/960 [==============================] - 2s 2ms/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 5/10
960/960 [==============================] - 2s 2ms/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 6/10
960/960 [==============================] - 2s 2ms/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 7/10
960/960 [==============================] - 2s 2ms/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 8/10
960/960 [==============================] - 2s 2ms/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 9/10
960/960 [==============================] - 2s 2ms/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 10/10
960/960 [==============================] - 2s 2ms/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042

模式二

from keras import Input, Model
from keras.layers import Dense,Conv1D,Embedding,MaxPool1D

class oneDCNN:
    def __init__(self, maxlen, max_features, embedding_dims,
                 last_activation='softmax'):
        self.maxlen = maxlen
        self.max_features = max_features
        self.embedding_dims = embedding_dims
#         self.class_num = class_num
        self.last_activation = last_activation

    def get_model(self):
        input = Input((self.maxlen,))
        embedding = Embedding(self.max_features, self.embedding_dims, input_length=self.maxlen)(input)
        c1 = Conv1D(32, 7, activation='relu')(embedding)
        MP1 = MaxPool1D(5)(c1)
        c2 = Conv1D(32, 7, activation="relu")(MP1)
        x = GlobalMaxPooling1D()(c2)

        output = Dense(1)(x)
        model = Model(inputs=input, outputs=output)
        return model
        
model = oneDCNN(maxlen=100,max_features=100,embedding_dims=500).get_model()

model.summary()
model.compile(optimizer = RMSprop(lr = 1e-4),
             loss = 'binary_crossentropy',
             metrics = ['acc'])

history = model.fit(x_train, y_train,
                   epochs = 10,
                   batch_size = 10,
                   validation_split = 0.2)
Model: "model_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_5 (InputLayer)         (None, 100)               0         
_________________________________________________________________
embedding_3 (Embedding)      (None, 100, 500)          50000     
_________________________________________________________________
conv1d_5 (Conv1D)            (None, 94, 32)            112032    
_________________________________________________________________
max_pooling1d_3 (MaxPooling1 (None, 18, 32)            0         
_________________________________________________________________
conv1d_6 (Conv1D)            (None, 12, 32)            7200      
_________________________________________________________________
global_max_pooling1d_4 (Glob (None, 32)                0         
_________________________________________________________________
dense_3 (Dense)              (None, 1)                 33        
=================================================================
Total params: 169,265
Trainable params: 169,265
Non-trainable params: 0
_________________________________________________________________
Train on 960 samples, validate on 240 samples
Epoch 1/10
960/960 [==============================] - 1s 964us/step - loss: 89.8610 - acc: 0.0094 - val_loss: -54.5870 - val_acc: 0.0042
Epoch 2/10
960/960 [==============================] - 1s 732us/step - loss: -682.0644 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 3/10
960/960 [==============================] - 1s 706us/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 4/10
960/960 [==============================] - 1s 676us/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 5/10
960/960 [==============================] - 1s 666us/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 6/10
960/960 [==============================] - 1s 677us/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 7/10
960/960 [==============================] - 1s 728us/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 8/10
960/960 [==============================] - 1s 694us/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 9/10
960/960 [==============================] - 1s 721us/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042
Epoch 10/10
960/960 [==============================] - 1s 729us/step - loss: -748.9009 - acc: 0.0052 - val_loss: -762.5254 - val_acc: 0.0042