具体内容可见:个人文章
The code that was used to train the classifiers can be found on GitHub at:
Link
PhysiNet-Haaglanden
选取了其中150例数据(剔除了No.98,标签数据长度不匹配)
参考Yasa的定义,每个脑电通道筛选了(283=84)个特征,每个眼电通道筛选了(233=69)个特征,肌电通道筛选了(10*3=30)个特征。
使用通道名称:eeg_name=“EEG C4-M1”, eog_name=“EOG E1-M2”, emg_name=“EMG chin”
准确率:73.62%
//
def calculate_accuracy(edf_file, txt_file):
# 这里应该是你的准确率计算逻辑
# 例如,打开EDF文件,进行一些处理,然后使用TXT文件中的数据进行比较
# 返回计算得到的准确率
# 加载 EDF 文件
raw = mne.io.read_raw_edf(edf_file, preload=True)
# Let's now load the human-scored hypnogram, where each value represents a 30-sec epoch.
hypno = np.loadtxt(txt_file, dtype=str)
sls = yasa.SleepStaging(raw, eeg_name="EEG C4-M1", eog_name="EOG E1-M2", emg_name="EMG chin")
y_pred = sls.predict()
# What is the accuracy of the prediction, compared to the human scoring
accuracy = (hypno == y_pred).sum() / y_pred.size
return accuracy
使用通道名称:eeg_name=“EEG C4-M1”
准确率:72.79%
def calculate_accuracy(edf_file, txt_file):
# 这里应该是你的准确率计算逻辑
# 例如,打开EDF文件,进行一些处理,然后使用TXT文件中的数据进行比较
# 返回计算得到的准确率
# 加载 EDF 文件
raw = mne.io.read_raw_edf(edf_file, preload=True)
# Let's now load the human-scored hypnogram, where each value represents a 30-sec epoch.
hypno = np.loadtxt(txt_file, dtype=str)
# sls = yasa.SleepStaging(raw, eeg_name="EEG C4-M1", eog_name="EOG E1-M2", emg_name="EMG chin")
sls = yasa.SleepStaging(raw, eeg_name="EEG C4-M1")
y_pred = sls.predict()
# What is the accuracy of the prediction, compared to the human scoring
accuracy = (hypno == y_pred).sum() / y_pred.size
return accuracy
通道:EEG C4-M1
准确率:77.23%
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# 构建模型
model = Sequential()
model.add(Dense(128, input_shape=(84,), activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(5, activation='softmax')) # 5 类分类
# 训练模型
model.fit(X_train, y_train, epochs=500, batch_size=32, validation_data=(X_test, y_test))
通道:‘EEG F4-M1’, ‘EEG C4-M1’, ‘EEG O2-M1’, ‘EEG C3-M2’
准确率:79.26%
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# 构建模型
model = Sequential()
model.add(Dense(128, input_shape=(336,), activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(5, activation='softmax')) # 5 类分类
# 训练模型
model.fit(X_train, y_train, epochs=500, batch_size=32, validation_data=(X_test, y_test))
通道:‘EEG F4-M1’, ‘EEG C4-M1’, ‘EEG O2-M1’, ‘EEG C3-M2’
准确率:82.62%
# 创建 LightGBM 分类器
lgbm = LGBMClassifier(n_estimators=100, learning_rate=0.05)
y_train_1d = np.argmax(y_train, axis=1)
y_test_1d = np.argmax(y_test, axis=1)
# 然后使用这个一维数组进行训练
lgbm.fit(X_train, y_train_1d)
# 预测和评估
y_pred = lgbm.predict(X_test)
accuracy = accuracy_score(y_test_1d, y_pred)
print(f"Accuracy: {accuracy}")
通道:‘EEG F4-M1’, ‘EEG C4-M1’, ‘EEG O2-M1’, ‘EEG C3-M2’, ‘EMG chin’, ‘EOG E1-M2’, ‘EOG E2-M2’
准确率:81.24%
# 构建模型
model = Sequential()
model.add(Dense(128, input_shape=(504,), activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(5, activation='softmax')) # 5 类分类
通道:‘EEG F4-M1’, ‘EEG C4-M1’, ‘EEG O2-M1’, ‘EEG C3-M2’, ‘EMG chin’, ‘EOG E1-M2’, ‘EOG E2-M2’
准确率:81.68%
def build_lstm_model(input_shape, num_classes):
model = Sequential()
# model.add(LSTM(50, input_shape=input_shape))
model.add(LSTM(100, input_shape=input_shape))
model.add(Dense(num_classes, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
通道:‘EEG F4-M1’, ‘EEG C4-M1’, ‘EEG O2-M1’, ‘EEG C3-M2’, ‘EMG chin’, ‘EOG E1-M2’, ‘EOG E2-M2’
准确率:84.22%
# 创建 LightGBM 分类器
lgbm = LGBMClassifier(n_estimators=100, learning_rate=0.05)
通道:‘EEG F4-M1’, ‘EEG C4-M1’, ‘EEG O2-M1’, ‘EEG C3-M2’, ‘EMG chin’, ‘EOG E1-M2’, ‘EOG E2-M2’
准确率:86.50%
# 由于是1D数据,我们需要扩展一个维度来适配CNN
X_train = X_train[..., np.newaxis]
X_test = X_test[..., np.newaxis]
# 构建模型
model = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(504, 1)),
tf.keras.layers.Conv1D(filters=64, kernel_size=3, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.MaxPooling1D(pool_size=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(5, activation='softmax')
])