手把手教你使用 1D 卷积和 LSTM 混合模型做 EEG 信号识别
更多技术干货第一时间送达
import pandas as pd
data = "data.csv"
df = pd.read_csv(data, header=0, index_col=0)
df1 = df.drop(["y"], axis=1)
lbls = df["y"].values - 1
import numpy as np
wave = np.zeros((11500, 178))
z = 0
for index, row in df1.iterrows():
wave[z, :] = row
z+=1
mean = wave.mean(axis=0)
wave -= mean
std = wave.std(axis=0)
wave /= std
def one_hot(y):
lbl = np.zeros(5)
lbl[y] = 1
return lbl
target = []
for value in lbls:
target.append(one_hot(value))
target = np.array(target)
wave = np.expand_dims(wave, axis=-1)
from keras.models import Sequential
from keras import layers
model = Sequential()
model.add(layers.Conv1D(64, 15, strides=2,input_shape=(178, 1), use_bias=False))
model.add(layers.ReLU())
model.add(layers.Conv1D(64, 3))
model.add(layers.Conv1D(64, 3, strides=2))
model.add(layers.ReLU())
model.add(layers.Conv1D(64, 3))
model.add(layers.Conv1D(64, 3, strides=2)) # [None, 54, 64]
model.add(layers.BatchNormalization())
model.add(layers.LSTM(64, dropout=0.5, return_sequences=True))
model.add(layers.LSTM(64, dropout=0.5, return_sequences=True))
model.add(layers.LSTM(32))
model.add(layers.Dense(5, activation="softmax"))
model.summary()
import matplotlib.pyplot as plt
import pandas as pd
from keras.models import Sequential
from keras import layers
from keras import regularizers
import os
import keras
import keras.backend as K
save_path = './keras_model.h5'
if os.path.isfile(save_path):
model.load_weights(save_path)
print('reloaded.')
adam = keras.optimizers.adam()
model.compile(optimizer=adam,
loss="categorical_crossentropy", metrics=["acc"])
# 计算学习率
def lr_scheduler(epoch):
# 每隔100个epoch,学习率减小为原来的0.5
if epoch % 100 == 0 and epoch != 0:
lr = K.get_value(model.optimizer.lr)
K.set_value(model.optimizer.lr, lr * 0.5)
print("lr changed to {}".format(lr * 0.5))
return K.get_value(model.optimizer.lr)
lrate = LearningRateScheduler(lr_scheduler)
history = model.fit(wave, target, epochs=400,
batch_size=128, validation_split=0.2,
verbose=1, callbacks=[lrate])
model.save_weights(save_path)
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
import matplotlib.pyplot as plt
import pandas as pd
from keras.models import Sequential
from keras import layers
from keras import regularizers
import os
import keras
import keras.backend as K
import numpy as np
from keras.callbacks import LearningRateScheduler
data = "data.csv"
df = pd.read_csv(data, header=0, index_col=0)
df1 = df.drop(["y"], axis=1)
lbls = df["y"].values - 1
wave = np.zeros((11500, 178))
z = 0
for index, row in df1.iterrows():
wave[z, :] = row
z+=1
mean = wave.mean(axis=0)
wave -= mean
std = wave.std(axis=0)
wave /= std
def one_hot(y):
lbl = np.zeros(5)
lbl[y] = 1
return lbl
target = []
for value in lbls:
target.append(one_hot(value))
target = np.array(target)
wave = np.expand_dims(wave, axis=-1)
model = Sequential()
model.add(layers.Conv1D(64, 15, strides=2,
input_shape=(178, 1), use_bias=False))
model.add(layers.ReLU())
model.add(layers.Conv1D(64, 3))
model.add(layers.Conv1D(64, 3, strides=2))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.5))
model.add(layers.Conv1D(64, 3))
model.add(layers.Conv1D(64, 3, strides=2))
model.add(layers.BatchNormalization())
model.add(layers.LSTM(64, dropout=0.5, return_sequences=True))
model.add(layers.LSTM(64, dropout=0.5, return_sequences=True))
model.add(layers.LSTM(32))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(5, activation="softmax"))
model.summary()
save_path = './keras_model3.h5'
if os.path.isfile(save_path):
model.load_weights(save_path)
print('reloaded.')
adam = keras.optimizers.adam()
model.compile(optimizer=adam,
loss="categorical_crossentropy", metrics=["acc"])
# 计算学习率
def lr_scheduler(epoch):
# 每隔100个epoch,学习率减小为原来的0.5
if epoch % 100 == 0 and epoch != 0:
lr = K.get_value(model.optimizer.lr)
K.set_value(model.optimizer.lr, lr * 0.5)
print("lr changed to {}".format(lr * 0.5))
return K.get_value(model.optimizer.lr)
lrate = LearningRateScheduler(lr_scheduler)
history = model.fit(wave, target, epochs=400,
batch_size=128, validation_split=0.2,
verbose=2, callbacks=[lrate])
model.save_weights(save_path)
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()