1、神经网络构成
由一个个被称为“神经元”的基本单元构成,神经元结构由输入、计算单元和输出组成;
2、激活函数
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
x = np.linspace(-10,10)
y_sigmoid = 1/(1+np.exp(-x)) # sigmoid函数
y_tanh = (np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x)) # tanh函数
fig = plt.figure()
# plot sigmoid
ax = fig.add_subplot(221)
ax.plot(x,y_sigmoid)
ax.grid()
ax.set_title('(a) Sigmoid')
# plot tanh
ax = fig.add_subplot(222)
ax.plot(x,y_tanh)
ax.grid()
ax.set_title('(b) Tanh')
# plot relu
ax = fig.add_subplot(223)
y_relu = np.array([0*item if item<0 else item for item in x ])
ax.plot(x,y_relu)
ax.grid()
ax.set_title('(c) ReLu')
# plot leaky relu
ax = fig.add_subplot(224)
y_relu = np.array([0.2*item if item<0 else item for item in x ])
ax.plot(x,y_relu)
ax.grid()
ax.set_title('(d) Leaky ReLu')
plt.tight_layout() # 自动控制间隔
3、神经网络各层对比
4、神经网络模型
5、BP神经网络:前向传播&反向传播&参数更新迭代
- 输入层:神经元数量由样本特征决定;
- 隐藏层:神经元数量一般设置为输入层2倍,激活函数自行设定,权重&偏置随机初始值;
- 输出层:不用设置激活函数
回归问题:输出端只有一个,且输出端没有激活函数;
分类问题:
二分类:输出端只有一个,且输出端激活函数是sigmoid函数;
多分类:输出端有k个(分类数),且输出端激活函数是softmax函数; - 循环计算:正向传播,计算误差;反向传播,调整参数;
- 隐藏层不要太多:一般一层即可;层数多,计算量大,样本量是否足够,存在梯度消失问题;梯度消失问题_百度百科
- 数据初始化:随机初始化使对称失效;初始化为一个很小的,接近零的随机值;不要全部置为0或所有参数都用相同的值作为初始值;
- 学习率:方法一:固定的学习率;方法二:动态的学习率(α/根号t:t为迭代次数);
- A Neural Network Playground
5、bp神经网络的python实现
import numpy as np
import pandas as pd
from math import sqrt
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
def load_data():
data=pd.read_table("bpdata.txt",sep=' ',names=['x1','x2','y'])
feature=data[['x1','x2']].values #数据特征
label_tmp=data[['y']] #0,1
label=np.mat(OneHotEncoder().fit_transform(label_tmp).toarray())
n_class = label.shape[1] # 得到类别的个数,2
return np.mat(feature), label,n_class
def sig(x):
'''Sigmoid函数
input: x(mat/float):自变量,可以是矩阵或者是任意实数
output: Sigmoid值(mat/float):Sigmoid函数的值
'''
return 1.0 / (1 + np.exp(-x))
def partial_sig(x):
'''Sigmoid导函数的值
input: x(mat/float):自变量,可以是矩阵或者是任意实数
output: out(mat/float):Sigmoid导函数的值
'''
m, n = np.shape(x)
out = np.mat(np.zeros((m, n)))
for i in range(m):
for j in range(n):
out[i, j] = sig(x[i, j]) * (1 - sig(x[i, j]))
return out
def hidden_in(feature, w0, b0):
'''计算隐含层的输入
input: feature(mat):特征
w0(mat):输入层到隐含层之间的权重
b0(mat):输入层到隐含层之间的偏置
output: hidden_in(mat):隐含层的输入
'''
m = np.shape(feature)[0]#m=400
hidden_in = feature * w0+b0
return hidden_in
def hidden_out(hidden_in):
'''隐含层的输出
input: hidden_in(mat):隐含层的输入
output: hidden_output(mat):隐含层的输出
'''
hidden_output = sig(hidden_in) #sigmoid函数
return hidden_output;
def predict_in(hidden_out, w1, b1):
'''计算输出层的输入
input: hidden_out(mat):隐含层的输出
w1(mat):隐含层到输出层之间的权重
b1(mat):隐含层到输出层之间的偏置
output: predict_in(mat):输出层的输入
'''
m = np.shape(hidden_out)[0]
predict_in = hidden_out * w1+b1
return predict_in
def predict_out(predict_in):
'''输出层的输出
input: predict_in(mat):输出层的输入
output: result(mat):输出层的输出
'''
result = sig(predict_in)
return result
def bp_train(feature, label, n_hidden, maxCycle, alpha, n_output):
'''计算隐含层的输入
input: feature(mat):特征
label(mat):标签
n_hidden(int):隐含层的节点个数
maxCycle(int):最大的迭代次数
alpha(float):学习率
n_output(int):输出层的节点个数
output: w0(mat):输入层到隐含层之间的权重
b0(mat):输入层到隐含层之间的偏置
w1(mat):隐含层到输出层之间的权重
b1(mat):隐含层到输出层之间的偏置
'''
m, n = np.shape(feature)#400,2
# 1、初始化
w0 = np.mat(np.random.rand(n, n_hidden))#2x20,两个输入,20个输出
b0 = np.mat(np.random.rand(1, n_hidden))#1x20
w1 = np.mat(np.random.rand(n_hidden, n_output))#20x2
b1 = np.mat(np.random.rand(1, n_output))#1x2
# 2、训练
i = 0 #迭代次数
while i <= maxCycle:
# 2.1、信号正向传播
# 2.1.1、计算隐含层的输入
hidden_input = hidden_in(feature, w0, b0) # mXn_hidden
# 2.1.2、计算隐含层的输出
hidden_output = hidden_out(hidden_input)
# 2.1.3、计算输出层的输入
output_in = predict_in(hidden_output, w1, b1) # mXn_output
# 2.1.4、计算输出层的输出
output_out = predict_out(output_in)
# 2.2、误差的反向传播
# 2.2.1、隐含层到输出层之间的残差
delta_output = -np.multiply((label - output_out), partial_sig(output_in))
# 2.2.2、输入层到隐含层之间的残差
delta_hidden = np.multiply((delta_output * w1.T), partial_sig(hidden_input))
# 2.3、 修正权重和偏置
w1 = w1 - alpha * (hidden_output.T * delta_output)
b1 = b1 - alpha * np.sum(delta_output, axis=0) * (1.0 / m)
w0 = w0 - alpha * (feature.T * delta_hidden)
b0 = b0 - alpha * np.sum(delta_hidden, axis=0) * (1.0 / m)
if i % 100 == 0:
print("\t-------- iter: ", i, " ,cost: ", (1.0/2) * get_cost(get_predict(feature, w0, w1, b0, b1) - label))
i += 1
return w0, w1, b0, b1
def get_cost(cost):
'''计算当前损失函数的值
input: cost(mat):预测值与标签之间的差
output: cost_sum / m (double):损失函数的值
'''
m,n = np.shape(cost)
cost_sum = 0.0
for i in range(m):
for j in range(n):
cost_sum += cost[i,j] * cost[i,j]
return cost_sum / m
def get_predict(feature, w0, w1, b0, b1):
'''计算最终的预测
input: feature(mat):特征
w0(mat):输入层到隐含层之间的权重
b0(mat):输入层到隐含层之间的偏置
w1(mat):隐含层到输出层之间的权重
b1(mat):隐含层到输出层之间的偏置
output: 预测值
'''
return predict_out(predict_in(hidden_out(hidden_in(feature, w0, b0)), w1, b1))
def err_rate(label, pre):
'''计算训练样本上的错误率
input: label(mat):训练样本的标签
pre(mat):训练样本的预测值
output: rate[0,0](float):错误率
'''
m = np.shape(label)[0]
err = 0.0
for i in range(m):
if label[i, 0] != pre[i, 0]:
err += 1
rate = err / m
return rate
if __name__ == "__main__":
#导入数据
print("--------- 1.load data ------------")
feature, label, n_class = load_data()#神经元有两个输入,两个输出
plt.figure(figsize=(8,4),dpi=100)
feature1=feature[:,0].getA()
feature2=feature[:,1].getA()
plt.scatter(feature1[0:200],feature2[0:200],color='b')
plt.scatter(feature1[200:],feature2[200:],color='r')
plt.title("original_data")
#训练网络模型
print("--------- 2.training ------------")
w0, w1, b0, b1 = bp_train(feature, label, 20, 1000, 0.1, n_class)
#得到最终的预测结果
print("--------- 3.get prediction ------------")
result = get_predict(feature, w0, w1, b0, b1)
print("训练准确性为:", (1 - err_rate(np.argmax(label, axis=1), np.argmax(result, axis=1))))
6、sklearn中的MLP
分类:sklearn.neural_network.MLPClassifier — scikit-learn 1.1.3 documentation
- hidden_layer_sizes:tuple, length = n_layers - 2, default=(100,):隐藏层:输入元祖,元祖的每个元素,表示当前层(元素位置)的神经元个数;;
- activation:{‘identity’, ‘logistic’, ‘tanh’, ‘relu’}, default=’relu’:激活函数:identity表示无,解决线性问题;
- solver:{‘lbfgs’, ‘sgd’, ‘adam’}, default=’adam’:权重(系数)优化方法:部分参数是否可用和此项参数相关;
- alpha:float, default=0.0001:L2正则化的强度;
- batch_size:int, default=’auto’:小批量梯度下降;
- learning_rate:{‘constant’, ‘invscaling’, ‘adaptive’}, default=’constant’:学习率:‘constant’(固定值), ‘invscaling(动态减小)’, ‘adaptive():若损失降低,固定值,若不降低,学习率减小’;
- learning_rate_init:float, default=0.001:初始学习率;
- power_t:float, default=0.5:更新学习率的步长;
- max_iter:int, default=200:最大迭代次数;
- shuffle:bool, default=True:迭代是否打乱顺序;
- tol:float, default=1e-4:迭代停止;
# 二分类
from sklearn.neural_network import MLPClassifier
X = [[0., 0.], [1., 1.],[2, 2]]
y = [0, 1, 1]
clf = MLPClassifier(hidden_layer_sizes=(5,2))
clf.fit(X, y)
clf.coefs_ # 权重
clf.intercepts_ # 偏置
clf.predict_proba([[2., 2.]]) # 输出概率
clf.predict([[2., 2.], [-1., -2.]]) # 预测
# 多分类
from sklearn.neural_network import MLPClassifier
X = [[0., 0.], [1., 1.],[2, 2]]
y = [0, 1, 2]
clf = MLPClassifier(hidden_layer_sizes=(5,2))
clf.fit(X, y)
回归:sklearn.neural_network.MLPRegressor — scikit-learn 1.1.3 documentation
# 回归
from sklearn.neural_network import MLPRegressor
X = [[0., 0.], [1., 1.],[2, 2]]
y = [0, 1, 2]#输出一个
clf = MLPRegressor(hidden_layer_sizes=(5, 2))
clf.fit(X, y)
葡萄酒分类:
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report,confusion_matrix
import numpy as np
data = np.genfromtxt('wine_data.csv', delimiter=',')
x_data = data[:,1:] # 数据特征
y_data = data[:,0] # 标签
# 分为训练和测试:stratify按标签分层抽样,保证数据分布一致;
x_train,x_test,y_train,y_test = train_test_split(x_data, y_data,stratify = y_data)
import pandas as pd
pd.Series(y_train).value_counts()
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train) # 标准化处理
x_test = scaler.transform(x_test)
mlp = MLPClassifier(hidden_layer_sizes=(20))
mlp.fit(x_train, y_train)
mlp.score(x_test,y_test)
# 参数优化
from sklearn.model_selection import GridSearchCV
mlp = MLPClassifier(max_iter=500)
params={'hidden_layer_sizes':[(10,),(30,),(100,)],'alpha':[0.000001,0.00001,0.0001]}
grid_search=GridSearchCV(mlp,param_grid=params,cv=10,verbose=2,n_jobs=-1)
grid_search.fit(x_train, y_train)
grid_search.best_params_
grid_search.score(x_test,y_test)