import numpy as np
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.datasets import load_iris
def load_data(filename):
iris = load_iris()
print(iris.data[0:5])
scaler = StandardScaler() # 标准化的目的与归一化一样,都是为了避免某个特征的重要程度过大或过小。
x_std = scaler.fit_transform(iris.data) # 标准化
print(scaler.mean_) # 每列的平均值
print(x_std[0:5])
# 将数据划分为训练集和测试集,test_size=.3表示30%的测试集
x_train, x_test, y_train, y_test = train_test_split(x_std, iris.target, test_size=0.3)
return x_train, x_test, y_train, y_test
def svm_test(x_train, x_test, y_train, y_test):
# rbf核函数,设置数据权重
svc = SVC(kernel='rbf', class_weight='balanced', ) # 核函数是高斯核
c_range = np.logspace(-5, 15, 11, base=2)
gamma_range = np.logspace(-9, 3, 13, base=2)
# 网格搜索交叉验证的参数范围,cv=3,3折交叉,C是在正则化参数的常量,
param_grid = [{'kernel': ['rbf', 'linear'], 'C': c_range, 'gamma': gamma_range}]
grid = GridSearchCV(svc, param_grid, cv=3, n_jobs=-1)
# 训练模型
print('开始训练...')
grid.fit(x_train, y_train)
# 计算测试集精度
score = grid.score(x_test, y_test)
print('精度为%s' % score)
if __name__ == '__main__':
svm_test(*load_data('example.csv'))