# -*- coding: utf-8 -*-
"""论文代码
基于Transformer模型的电影评论感情分析
- 环境 tensorflow==2.7.0 GPU
numpy==1.19.5
matplotlib==3.2.2
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
tf.__version__
tf.test.is_gpu_available()
"""# 一、数据集"""
# 只考虑频次最高的4000个单词作为整个词典
vocab_size = 4000
# 在imdb数据集中,对于每个电影仅考虑其评论的前200个单词
maxlen = 200
(x_train, y_train), (x_val, y_val) = keras.datasets.imdb.load_data(num_words=vocab_size)
# 将每个训练、测试样本序列补充为定长
x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=maxlen)
x_val = keras.preprocessing.sequence.pad_sequences(x_val, maxlen=maxlen)
"""# 二、模型"""
# 定义Transformer层
class TransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
super(TransformerBlock, self).__init__()
self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
self.ffn = keras.Sequential([layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim),])
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
def call(self, inputs, training):
attn_output = self.att(inputs, inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(out1 + ffn_output)
# 定义单词与位置编码层
class TokenAndPositionEmbedding(layers.Layer):
def __init__(self, maxlen, vocab_size, embed_dim):
super(TokenAndPositionEmbedding, self).__init__()
self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)
self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)
def call(self, x):
maxlen = tf.shape(x)[-1]
positions = tf.range(start=0, limit=maxlen, delta=1)
positions = self.pos_emb(positions)
x = self.token_emb(x)
return x + positions
# 定义每个样本序列的编码长度
embed_dim = 32
# 定义注意力头(Attention Head)的个数
num_heads = 2
# 定义Transformer层中前向传播网络里隐藏单元的个数
ff_dim = 32
# 定义整个模型的输入、输出和连接顺序
model = keras.Sequential([
TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim),
TransformerBlock(embed_dim, num_heads, ff_dim),
layers.GlobalAveragePooling1D(),
layers.Dropout(0.3),
layers.Dense(20, activation="sigmoid"),
layers.Dropout(0.3),
layers.Dense(2, activation="softmax")
])
checkpoint_path = './checkpoint/min.{epoch:02d}-{val_loss:.2f}.ckpt'
def scheduler(epoch, lr):
if epoch < 10:
return lr
else:
return lr * tf.math.exp(-0.1)
callbacks = [
# 保存模型
tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, # 模型保存路径
save_weights_only=True,# 只保存权重和偏执
verbose=1, # 以进度条方式展示
period=1 #每1个周期(epoch)存一个文件
),
# 终止训练的回调函数
tf.keras.callbacks.EarlyStopping(monitor='val_loss',# 监控对象
patience=3),# 允许周期
# 超过3个周期,val_loss升高就停止,防止过拟合
# 调整学习率
tf.keras.callbacks.LearningRateScheduler(scheduler, verbose=0)]
"""# 三、模型训练与评估"""
# 编译及训练模型
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
history = model.fit(x_train, y_train, batch_size=128,epochs=10,validation_split=0.2,callbacks=callbacks)
model.summary()
import matplotlib.pyplot as plt
def show_train_history(train_history,train_metrics,val_metrics):
plt.plot(train_history[train_metrics])
plt.plot(train_history[val_metrics])
plt.title(f'Trian History {train_metrics}')
plt.ylabel(train_metrics)
plt.xlabel('epoch')
plt.legend(['trian','validation'],loc='upper left')
plt.show()
show_train_history(history.history,'loss','val_loss')
show_train_history(history.history,'accuracy','val_accuracy')
"""# 四、预测"""
model.evaluate(x_val,y_val,verbose=1) # 0是无,1是进度条,2是一个epoch一个
pre = model.predict(x_val)
pre[0]
import numpy as np
state = {1:'pos',0:'neg'}
state[np.argmax(pre[0])]
state[y_val[0]] # 1是积极