Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

some update to make transformer model work #14

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 31 additions & 29 deletions Download_Raw_EEG_Data/MIND_Get_EDF.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,49 +4,51 @@
import requests
from bs4 import BeautifulSoup


def download_file(url, index):
local_filename = url.split('/')[-1]
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return local_filename
local_filename = url.split('/')[-1]
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return local_filename


for i in range(1, 110):

if i < 10:
root_link="https://archive.physionet.org/pn4/eegmmidb/S00" + str(i) + "/"
root_link = "https://archive.physionet.org/pn4/eegmmidb/S00" + str(i) + "/"

elif i >= 10 and i < 100 :
elif i >= 10 and i < 100:
root_link = "https://archive.physionet.org/pn4/eegmmidb/S0" + str(i) + "/"

else:
root_link = "https://archive.physionet.org/pn4/eegmmidb/S" + str(i) + "/"

r=requests.get(root_link)
r = requests.get(root_link)

if r.status_code == 200:
soup = BeautifulSoup(r.text, features="html.parser")
# print soup.prettify()

if r.status_code==200:
soup=BeautifulSoup(r.text, features="html.parser")
# print soup.prettify()
index = 1
for link in soup.find_all('a'):
new_link = root_link + link.get('href')

index=1
for link in soup.find_all('a'):
new_link=root_link+link.get('href')
if new_link.endswith(".edf"):
file_path = download_file(new_link, str(index))
print("downloading:" + new_link + " -> " + file_path)
index += 1

if new_link.endswith(".edf"):
file_path=download_file(new_link,str(index))
print("downloading:"+new_link+" -> "+file_path)
index+=1
# if new_link.endswith(".edf.event"):
# file_path = download_file(new_link, str(index))
# print("downloading:" + new_link + " -> " + file_path)
# index += 1

# if new_link.endswith(".edf.event"):
# file_path = download_file(new_link, str(index))
# print("downloading:" + new_link + " -> " + file_path)
# index += 1
print("all download finished")

print("all download finished")

else:
print("errors occur.")
print("errors occur.")
4 changes: 3 additions & 1 deletion Models/Evaluation_Metrics/Metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
# Import useful packages
import tensorflow as tf


def evaluation(y, prediction):
'''

Expand Down Expand Up @@ -270,7 +271,8 @@ def evaluation(y, prediction):
Prediction_T4 = T1_T4 + T2_T4 + T3_T4 + T4_T4

p0 = (T1_T1 + T2_T2 + T3_T3 + T4_T4) / Test_Set_Num
pe = (Actual_T1 * Prediction_T1 + Actual_T2 * Prediction_T2 + Actual_T3 * Prediction_T3 + Actual_T4 * Prediction_T4) / \
pe = (
Actual_T1 * Prediction_T1 + Actual_T2 * Prediction_T2 + Actual_T3 * Prediction_T3 + Actual_T4 * Prediction_T4) / \
(Test_Set_Num * Test_Set_Num)

Kappa_Metric = (p0 - pe) / (1 - pe)
Expand Down
30 changes: 17 additions & 13 deletions Models/main-BiGRU-with-Attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

# Hide the Configuration and Warnings
import os

os.environ["TF_CPP_MIN_LOG_LEVEL"] = '3'

import random
Expand Down Expand Up @@ -42,29 +43,29 @@
test_labels = tf.squeeze(test_labels).eval(session=sess)

# Model Hyper-parameters
n_input = 64 # The input size of signals at each time
max_time = 64 # The unfolded time slices of the BiGRU Model
gru_size = 256 # The number of GRUs inside the BiGRU Model
n_input = 64 # The input size of signals at each time
max_time = 64 # The unfolded time slices of the BiGRU Model
gru_size = 256 # The number of GRUs inside the BiGRU Model
attention_size = 8 # The number of neurons of fully-connected layer inside the Attention Mechanism

n_class = 4 # The number of classification classes
n_hidden = 64 # The number of hidden units in the first fully-connected layer
num_epoch = 300 # The number of Epochs that the Model run
n_class = 4 # The number of classification classes
n_hidden = 64 # The number of hidden units in the first fully-connected layer
num_epoch = 300 # The number of Epochs that the Model run
keep_rate = 0.75 # Keep rate of the Dropout

lr = tf.constant(1e-4, dtype=tf.float32) # Learning rate
lr_decay_epoch = 50 # Every (50) epochs, the learning rate decays
lr_decay = 0.50 # Learning rate Decay by (50%)
lr_decay_epoch = 50 # Every (50) epochs, the learning rate decays
lr_decay = 0.50 # Learning rate Decay by (50%)

batch_size = 1024
n_batch = train_data.shape[0] // batch_size

# Initialize Model Parameters (Network Weights and Biases)
# This Model only uses Two fully-connected layers, and u sure can add extra layers DIY
weights_1 = tf.Variable(tf.truncated_normal([2 * gru_size, n_hidden], stddev=0.01))
biases_1 = tf.Variable(tf.constant(0.01, shape=[n_hidden]))
biases_1 = tf.Variable(tf.constant(0.01, shape=[n_hidden]))
weights_2 = tf.Variable(tf.truncated_normal([n_hidden, n_class], stddev=0.01))
biases_2 = tf.Variable(tf.constant(0.01, shape=[n_class]))
biases_2 = tf.Variable(tf.constant(0.01, shape=[n_class]))

# Define Placeholders
x = tf.placeholder(tf.float32, [None, 64 * 64])
Expand Down Expand Up @@ -122,12 +123,15 @@
# Show Accuracy and Loss on Training and Test Set
# Here, for training set, we only show the result of first 100 samples
# If u want to show the result on the entire training set, please modify it.
train_accuracy, train_loss = sess.run([Global_Average_Accuracy, loss], feed_dict={x: train_data[0:100], y: train_labels[0:100], keep_prob: 1.0})
Test_summary, test_accuracy, test_loss = sess.run([merged, Global_Average_Accuracy, loss], feed_dict={x: test_data, y: test_labels, keep_prob: 1.0})
train_accuracy, train_loss = sess.run([Global_Average_Accuracy, loss],
feed_dict={x: train_data[0:100], y: train_labels[0:100], keep_prob: 1.0})
Test_summary, test_accuracy, test_loss = sess.run([merged, Global_Average_Accuracy, loss],
feed_dict={x: test_data, y: test_labels, keep_prob: 1.0})
test_writer.add_summary(Test_summary, epoch)

# Show the Model Capability
print("Iter " + str(epoch) + ", Testing Accuracy: " + str(test_accuracy) + ", Training Accuracy: " + str(train_accuracy))
print("Iter " + str(epoch) + ", Testing Accuracy: " + str(test_accuracy) + ", Training Accuracy: " + str(
train_accuracy))
print("Iter " + str(epoch) + ", Testing Loss: " + str(test_loss) + ", Training Loss: " + str(train_loss))
print("Learning rate is ", learning_rate)
print('\n')
Expand Down
43 changes: 28 additions & 15 deletions Models/main-Transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,23 +11,35 @@
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras import backend as K
import os

# 获取当前物理GPU
# gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
# for gpu in gpus:
# tf.config.experimental.get_memory_growth(gpu, True)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
tf.config.experimental.get_memory_growth = True
# gpu_options = tf.GPUOptions(allow_growth=True) # 不占满全部显存,按需分配
# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# K.set_session(sess)

# Read Training Data
train_data = pd.read_csv('training_set.csv', header=None)
train_data = pd.read_csv('/home/teslav/EEG_DL/training_set.csv', header=None)
train_data = np.array(train_data).astype('float32')

# Read Training Labels
train_labels = pd.read_csv('training_label.csv', header=None)
train_labels = np.array(train_labels).astype('float32')
train_labels = pd.read_csv('/home/teslav/EEG_DL/training_label.csv', header=None)
train_labels = np.array(train_labels).astype('int')
train_labels = np.squeeze(train_labels)

# Read Testing Data
test_data = pd.read_csv('test_set.csv', header=None)
test_data = pd.read_csv('/home/teslav/EEG_DL/test_set.csv', header=None)
test_data = np.array(test_data).astype('float32')

# Read Testing Labels
test_labels = pd.read_csv('test_label.csv', header=None)
test_labels = np.array(test_labels).astype('float32')
test_labels = pd.read_csv('/home/teslav/EEG_DL/test_label.csv', header=None)
test_labels = np.array(test_labels).astype('int')
test_labels = np.squeeze(test_labels)


Expand Down Expand Up @@ -64,13 +76,13 @@ def call(self, x):
return out


maxlen = 3 # Only consider 3 input time points
embed_dim = 97 # Features of each time point
num_heads = 8 # Number of attention heads
ff_dim = 64 # Hidden layer size in feed forward network inside transformer
maxlen = 64 # Only consider 3 input time points
embed_dim = 64 # Features of each time point
num_heads = 8 # Number of attention heads
ff_dim = 64 # Hidden layer size in feed forward network inside transformer

# Input Time-series
inputs = layers.Input(shape=(maxlen*embed_dim,))
inputs = layers.Input(shape=(maxlen * embed_dim,))
embedding_layer = TokenAndPositionEmbedding(maxlen, embed_dim)
x = embedding_layer(inputs)

Expand All @@ -89,10 +101,11 @@ def call(self, x):

model = keras.Model(inputs=inputs, outputs=outputs)

model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-4),
loss="binary_crossentropy",
metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.Recall()])
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4), loss=tf.keras.losses.CategoricalCrossentropy(), metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.categorical_accuracy, tf.keras.metrics.Recall()])


callbacks = [keras.callbacks.TensorBoard(update_freq='epoch')]
history = model.fit(
train_data, train_labels, batch_size=128, epochs=1000, validation_data=(test_data, test_labels)
train_data, train_labels, batch_size=128, epochs=1000, validation_data=(test_data, test_labels), callbacks=callbacks
)

4 changes: 2 additions & 2 deletions Preprocess_EEG_Data/For-CNN-based-Models/make_dataset.m
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
Data_points = Time_consider * 160;

for i = 1:num_channel
Dataset = ['Dataset_', num2str(i), '.mat'];
Dataset = ['Download_Raw_EEG_Data/20-Subjects/Dataset_', num2str(i), '.mat'];
Dataset = load(Dataset);
Dataset = Dataset.Dataset;
Dataset = reshape(Dataset, num_subject*num_trial, num_data);
Expand Down Expand Up @@ -44,7 +44,7 @@
Dataset = Dataset';

%% Read and Create Labels
Labels = load('Labels_1.mat');
Labels = load('Download_Raw_EEG_Data/20-Subjects/Labels_1.mat');
Labels = Labels.Labels;
Labels = reshape(Labels, num_subject*num_trial, 4);
[row, column] = size(Labels);
Expand Down
1 change: 1 addition & 0 deletions Preprocess_EEG_Data/For-DNN-based-Models/make_dataset.m
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@

all_data = ALL_Dataset(:, 1:4096);
all_labels = ALL_Dataset(:, end);

%%
xlswrite('training_set.xlsx', training_set);
xlswrite('test_set.xlsx', test_set);
Expand Down
1 change: 1 addition & 0 deletions Preprocess_EEG_Data/For-RNN-based-Models/make_dataset.m
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@

all_data = ALL_Dataset(:, 1:4096);
all_labels = ALL_Dataset(:, end);

%%
xlswrite('training_set.xlsx', training_set);
xlswrite('test_set.xlsx', test_set);
Expand Down