跳到主要内容

Scikit-learn

Scikit-learn (sklearn) 是 Python 中最流行的机器学习库,提供了简单高效的数据挖掘和数据分析工具,建立在 NumPy、SciPy 和 Matplotlib 之上。

简介

Scikit-learn 特性

Scikit-learn 核心特性:

  • 丰富的算法: 分类、回归、聚类、降维等
  • 统一API: fit、predict、transform 一致接口
  • 模型选择: 交叉验证、网格搜索、指标评估
  • 预处理: 标准化、归一化、编码等
  • 特征工程: 特征选择、特征提取
  • 管道机制: Pipeline 工作流
  • 模型持久: 保存和加载训练好的模型
  • 文档完善: 详细的文档和示例

适用场景:

  • 分类问题: 图像识别、文本分类、垃圾邮件检测
  • 回归问题: 房价预测、销量预测、股票预测
  • 聚类问题: 客户细分、图像分割、异常检测
  • 降维问题: 数据可视化、特征提取、压缩
  • 模型选择: 超参数调优、模型评估

安装 Scikit-learn

# 创建虚拟环境
python -m venv venv

# Windows 激活
venv\Scripts\activate

# Linux/Mac 激活
source venv/bin/activate

# 安装 Scikit-learn
pip install scikit-learn

# 安装特定版本
pip install scikit-learn==1.3.0

# 安装依赖
pip install numpy scipy matplotlib pandas
pip install joblib # 模型并行和持久化

# 查看版本
python -c "import sklearn; print(sklearn.__version__)"

快速开始

导入和配置

import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report

# 查看版本
print(sklearn.__version__)

# 加载示例数据集
iris = datasets.load_iris()
X, y = iris.data, iris.target

# 划分训练测试集
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)

# 数据标准化
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

# 训练模型
model = LogisticRegression(random_state=42)
model.fit(X_train_scaled, y_train)

# 预测
y_pred = model.predict(X_test_scaled)

# 评估
print(f'准确率: {accuracy_score(y_test, y_pred):.2f}')
print(classification_report(y_test, y_pred))

内置数据集

from sklearn import datasets

# 分类数据集
# 鸢尾花数据集 (150个样本, 4个特征, 3个类别)
iris = datasets.load_iris()

# 手写数字数据集 (1797个样本, 64个特征, 10个类别)
digits = datasets.load_digits()

# 葡萄酒数据集 (178个样本, 13个特征, 3个类别)
wine = datasets.load_wine()

# 乳腺癌数据集 (569个样本, 30个特征, 2个类别)
cancer = datasets.load_breast_cancer()

# 回归数据集
# 波士顿房价数据集 (506个样本, 13个特征)
boston = datasets.load_boston()

# 糖尿病数据集 (442个样本, 10个特征)
diabetes = datasets.load_diabetes()

# 加利福尼亚房价数据集 (20640个样本, 8个特征)
california = datasets.fetch_california_housing()

# 生成数据集
# 分类数据集
X, y = datasets.make_classification(
n_samples=1000, # 样本数
n_features=20, # 特征数
n_informative=15, # 信息特征数
n_redundant=5, # 冗余特征数
n_classes=3, # 类别数
random_state=42
)

# 回归数据集
X, y = datasets.make_regression(
n_samples=1000,
n_features=10,
n_informative=8,
noise=0.1,
random_state=42
)

# 聚类数据集
X, y = datasets.make_blobs(
n_samples=500,
n_features=2,
centers=5,
cluster_std=1.0,
random_state=42
)

数据预处理

标准化和归一化

from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
import numpy as np

# 示例数据
X = np.array([[1, -1, 2],
[2, 0, 0],
[0, 1, -1]])

# 标准化 (均值0,方差1)
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# 公式: (X - mean) / std

# 归一化 (0-1之间)
minmax_scaler = MinMaxScaler()
X_minmax = minmax_scaler.fit_transform(X)
# 公式: (X - min) / (max - min)

# 鲁棒缩放 (使用中位数和四分位数,抗异常值)
robust_scaler = RobustScaler()
X_robust = robust_scaler.fit_transform(X)
# 公式: (X - median) / IQR

# 最大绝对值缩放
maxabs_scaler = MaxAbsScaler()
X_maxabs = maxabs_scaler.fit_transform(X)

# 正则化 (L1, L2)
from sklearn.preprocessing import Normalizer
normalizer = Normalizer(norm='l2') # l1, l2, max
X_normalized = normalizer.fit_transform(X)

编码分类变量

import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.compose import ColumnTransformer

# 标签编码 (目标变量)
y = ['cat', 'dog', 'cat', 'bird', 'dog']
le = LabelEncoder()
y_encoded = le.fit_transform(y)
# 结果: [1, 2, 1, 0, 2]

# 查看类别映射
print(le.classes_) # ['bird', 'cat', 'dog']
print(le.inverse_transform([0, 1, 2])) # ['bird', 'cat', 'dog']

# 独热编码 (特征变量)
X = [['Male', 1], ['Female', 3], ['Male', 2]]

# 方式1: OneHotEncoder
encoder = OneHotEncoder(sparse=False)
X_encoded = encoder.fit_transform(X)
# 结果: 3列 (Male, Female, 1, 2, 3)

# 方式2: get_dummies (Pandas)
df = pd.DataFrame(X, columns=['gender', 'number'])
df_dummies = pd.get_dummies(df, columns=['gender'])

# 方式3: ColumnTransformer (处理混合数据)
ct = ColumnTransformer([
('encoder', OneHotEncoder(), [0]) # 对第0列进行独热编码
], remainder='passthrough') # 其他列保持不变
X_transformed = ct.fit_transform(X)

处理缺失值

import numpy as np
from sklearn.impute import SimpleImputer, KNNImputer

# 创建包含缺失值的数据
X = np.array([[1, 2, np.nan],
[4, np.nan, 6],
[7, 8, 9]])

# 均值填补
imputer = SimpleImputer(strategy='mean') # mean, median, most_frequent, constant
X_imputed = imputer.fit_transform(X)

# 中位数填补
imputer = SimpleImputer(strategy='median')
X_imputed = imputer.fit_transform(X)

# 众数填补
imputer = SimpleImputer(strategy='most_frequent')
X_imputed = imputer.fit_transform(X)

# 常数填补
imputer = SimpleImputer(strategy='constant', fill_value=0)
X_imputed = imputer.fit_transform(X)

# KNN填补 (基于K近邻)
knn_imputer = KNNImputer(n_neighbors=2)
X_imputed = knn_imputer.fit_transform(X)

# 标记缺失值
from sklearn.impute import MissingIndicator
indicator = MissingIndicator()
X_missing = indicator.fit_transform(X)

特征选择

from sklearn.feature_selection import SelectKBest, f_classif, RFE
from sklearn.ensemble import RandomForestClassifier

# 移除低方差特征
from sklearn.feature_selection import VarianceThreshold
selector = VarianceThreshold(threshold=0.1) # 方差阈值
X_var = selector.fit_transform(X)

# 单变量特征选择 (选择最好的K个特征)
selector = SelectKBest(f_classif, k=2) # f_classif(分类), f_regression(回归)
X_new = selector.fit_transform(X, y)

# 查看选中的特征
print(selector.scores_)
print(selector.get_support())

# 递归特征消除 (RFE)
estimator = RandomForestClassifier(n_estimators=100, random_state=42)
selector = RFE(estimator, n_features_to_select=5)
X_rfe = selector.fit_transform(X, y)

# 查看特征排名
print(selector.ranking_)

# 基于模型的特征选择
model = RandomForestClassifier(n_estimators=100, random_state=42)
model.fit(X, y)

# 特征重要性
importances = model.feature_importances_
indices = np.argsort(importances)[::-1]

# 选择重要性前N的特征
selector = SelectFromModel(model, threshold='median')
X_selected = selector.fit_transform(X, y)

特征工程

import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn.decomposition import PCA

# 多项式特征 (生成交互项)
X = np.arange(6).reshape(3, 2)
poly = PolynomialFeatures(degree=2, include_bias=False)
X_poly = poly.fit_transform(X)
# 生成: [x1, x2, x1^2, x1*x2, x2^2]

# PCA降维
pca = PCA(n_components=2) # 降到2维
X_pca = pca.fit_transform(X)

# 查看解释方差比
print(pca.explained_variance_ratio_)
print(pca.explained_variance_ratio_.cumsum()) # 累计解释方差

# 选择保留95%方差的组件数
pca = PCA(n_components=0.95)
X_pca = pca.fit_transform(X)

# LDA降维 (线性判别分析)
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
lda = LinearDiscriminantAnalysis(n_components=2)
X_lda = lda.fit_transform(X, y)

# 特征离散化
from sklearn.preprocessing import KBinsDiscretizer
discretizer = KBinsDiscretizer(n_bins=3, encode='ordinal', strategy='uniform')
X_binned = discretizer.fit_transform(X)

# 分箱
# strategy: 'uniform'(等宽), 'quantile'(等频), 'kmeans'(聚类)

监督学习

线性回归

from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
import numpy as np

# 准备数据
X = np.random.randn(100, 5)
y = 2 * X[:, 0] + 3 * X[:, 1] + np.random.randn(100) * 0.1

# 划分数据
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 线性回归
model = LinearRegression()
model.fit(X_train, y_train)

# 预测
y_pred = model.predict(X_test)

# 评估
print(f'系数: {model.coef_}')
print(f'截距: {model.intercept_}')
print(f'MSE: {mean_squared_error(y_test, y_pred):.2f}')
print(f'R2: {r2_score(y_test, y_pred):.2f}')
print(f'MAE: {mean_absolute_error(y_test, y_pred):.2f}')

# 岭回归 (L2正则化)
ridge = Ridge(alpha=1.0) # alpha控制正则化强度
ridge.fit(X_train, y_train)

# Lasso回归 (L1正则化,可进行特征选择)
lasso = Lasso(alpha=1.0)
lasso.fit(X_train, y_train)

# ElasticNet (L1+L2正则化)
elastic = ElasticNet(alpha=1.0, l1_ratio=0.5) # l1_ratio控制L1和L2的比例
elastic.fit(X_train, y_train)

逻辑回归

from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.datasets import load_breast_cancer

# 加载数据
data = load_breast_cancer()
X, y = data.data, data.target

# 划分数据
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 标准化
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

# 逻辑回归
model = LogisticRegression(
penalty='l2', # 正则化类型: 'l1', 'l2', 'elasticnet', None
C=1.0, # 正则化强度的倒数(越小正则化越强)
solver='lbfgs', # 优化算法: 'lbfgs', 'liblinear', 'saga'
max_iter=100, # 最大迭代次数
random_state=42
)
model.fit(X_train_scaled, y_train)

# 预测
y_pred = model.predict(X_test_scaled)
y_pred_proba = model.predict_proba(X_test_scaled) # 预测概率

# 评估
print(f'准确率: {accuracy_score(y_test, y_pred):.2f}')
print('混淆矩阵:')
print(confusion_matrix(y_test, y_pred))
print('分类报告:')
print(classification_report(y_test, y_pred, target_names=data.target_names))

# 多分类逻辑回归
from sklearn.datasets import load_iris
iris = load_iris()
X, y = iris.data, iris.target

model = LogisticRegression(multi_class='ovr', # 'ovr'(一对其他)或'multinomial'(多项式)
max_iter=200)
model.fit(X, y)

决策树

from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn import tree

# 分类决策树
clf = DecisionTreeClassifier(
criterion='gini', # 'gini'基尼系数或'entropy'信息熵
max_depth=5, # 最大深度
min_samples_split=2, # 节点分裂最小样本数
min_samples_leaf=1, # 叶节点最小样本数
max_features=None, # 最大特征数
random_state=42
)
clf.fit(X_train, y_train)

# 回归决策树
reg = DecisionTreeRegressor(
criterion='squared_error', # 'squared_error'或'absolute_error'
max_depth=5,
random_state=42
)
reg.fit(X_train, y_train)

# 可视化决策树
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 10))
tree.plot_tree(clf, filled=True, feature_names=data.feature_names)
plt.show()

# 导出为文本
print(tree.export_text(clf, feature_names=data.feature_names))

# 导出为Graphviz
dot_data = tree.export_graphviz(clf,
feature_names=data.feature_names,
class_names=data.target_names,
filled=True)

随机森林

from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor

# 随机森林分类器
rf_clf = RandomForestClassifier(
n_estimators=100, # 树的数量
criterion='gini',
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
max_features='sqrt', # 'sqrt'(根号n特征), 'log2', None
bootstrap=True, # 是否使用bootstrap采样
oob_score=True, # 是否使用袋外样本评估
n_jobs=-1, # 并行数(-1使用所有CPU)
random_state=42
)
rf_clf.fit(X_train, y_train)

# 特征重要性
importances = rf_clf.feature_importances_
indices = np.argsort(importances)[::-1]

# 袋外样本评估
print(f'OOB Score: {rf_clf.oob_score_:.2f}')

# 随机森林回归器
rf_reg = RandomForestRegressor(
n_estimators=100,
criterion='squared_error',
max_depth=5,
random_state=42
)
rf_reg.fit(X_train, y_train)

支持向量机

from sklearn.svm import SVC, SVR, LinearSVC

# 支持向量分类
svm = SVC(
kernel='rbf', # 'linear', 'poly', 'rbf', 'sigmoid'
C=1.0, # 正则化参数
gamma='scale', # 核系数: 'scale', 'auto'或具体值
degree=3, # 多项式次数(poly核)
probability=True, # 是否启用概率估计
random_state=42
)
svm.fit(X_train_scaled, y_train)

# 预测
y_pred = svm.predict(X_test_scaled)
y_pred_proba = svm.predict_proba(X_test_scaled)

# 线性SVM (更快,适合大数据)
linear_svm = LinearSVC(C=1.0, random_state=42)
linear_svm.fit(X_train_scaled, y_train)

# 支持向量回归
svr = SVR(kernel='rbf', C=1.0, gamma='scale')
svr.fit(X_train_scaled, y_train)

K近邻

from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor

# KNN分类器
knn = KNeighborsClassifier(
n_neighbors=5, # K值
weights='uniform', # 'uniform'距离相等, 'distance'距离加权
algorithm='auto', # 'auto', 'ball_tree', 'kd_tree', 'brute'
leaf_size=30, # 叶大小
p=2, # 距离度量: 1(曼哈顿), 2(欧氏)
metric='minkowski' # 距离度量
)
knn.fit(X_train_scaled, y_train)

# 寻找最优K值
from sklearn.model_selection import cross_val_score
k_range = range(1, 31)
k_scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
scores = cross_val_score(knn, X_train_scaled, y_train, cv=5, scoring='accuracy')
k_scores.append(scores.mean())

best_k = k_range[np.argmax(k_scores)]
print(f'最优K值: {best_k}')

# KNN回归器
knn_reg = KNeighborsRegressor(n_neighbors=5, weights='distance')
knn_reg.fit(X_train_scaled, y_train)

朴素贝叶斯

from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB

# 高斯朴素贝叶斯 (特征服从正态分布)
gnb = GaussianNB()
gnb.fit(X_train, y_train)

# 多项式朴素贝叶斯 (适用于离散计数数据)
mnb = MultinomialNB(alpha=1.0) # alpha拉普拉斯平滑
mnb.fit(X_train, y_train)

# 伯努利朴素贝叶斯 (特征为二值)
bnb = BernoulliNB(alpha=1.0)
bnb.fit(X_train, y_train)

# 类先验概率
print(gnb.class_prior_)
print(gnb.class_count_)

无监督学习

K-means聚类

from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt

# K-means聚类
kmeans = KMeans(
n_clusters=3, # 聚类数
init='k-means++', # 初始化方法: 'k-means++'或'random'
n_init=10, # 运行次数(选最好的)
max_iter=300, # 最大迭代次数
random_state=42
)
kmeans.fit(X)

# 聚类标签
labels = kmeans.labels_
centers = kmeans.cluster_centers_

# 预测新样本
new_labels = kmeans.predict(X_new)

# 惯性(总和平方误差)
inertia = kmeans.inertia_

# 轮廓系数(评估聚类质量)
score = silhouette_score(X, labels)

# 选择最优K值(肘部法则)
inertias = []
K = range(1, 11)
for k in K:
kmeans = KMeans(n_clusters=k, random_state=42)
kmeans.fit(X)
inertias.append(kmeans.inertia_)

plt.plot(K, inertias, 'bo-')
plt.xlabel('K')
plt.ylabel('Inertia')
plt.show()

层次聚类

from sklearn.cluster import AgglomerativeClustering
from scipy.cluster.hierarchy import dendrogram, linkage

# 层次聚类
agg = AgglomerativeClustering(
n_clusters=3, # 聚类数
affinity='euclidean', # 距离度量
linkage='ward' # linkage标准: 'ward', 'complete', 'average', 'single'
)
labels = agg.fit_predict(X)

# 绘制树状图
linkage_matrix = linkage(X, method='ward')
plt.figure(figsize=(10, 6))
dendrogram(linkage_matrix)
plt.show()

DBSCAN

from sklearn.cluster import DBSCAN

# DBSCAN聚类
dbscan = DBSCAN(
eps=0.5, # 邻域半径
min_samples=5, # 最小样本数
metric='euclidean', # 距离度量
algorithm='auto' # 'auto', 'ball_tree', 'kd_tree', 'brute'
)
labels = dbscan.fit_predict(X)

# 识别噪声点
n_noise = list(labels).count(-1)
print(f'噪声点数量: {n_noise}')

# 获取聚类数(不含噪声)
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
print(f'聚类数: {n_clusters}')

PCA降维

from sklearn.decomposition import PCA
import matplotlib.pyplot as plt

# PCA降维
pca = PCA(n_components=2) # 降到2维
X_pca = pca.fit_transform(X)

# 查看解释方差比
print(f'各主成分解释方差比: {pca.explained_variance_ratio_}')
print(f'累计解释方差比: {pca.explained_variance_ratio_.cumsum()}')

# 选择保留95%方差的组件数
pca = PCA(n_components=0.95)
X_pca = pca.fit_transform(X)
print(f'选择的组件数: {pca.n_components_}')

# 绘制解释方差图
pca_full = PCA().fit(X)
plt.figure(figsize=(10, 6))
plt.plot(np.cumsum(pca_full.explained_variance_ratio_))
plt.xlabel('主成分数')
plt.ylabel('累计解释方差比')
plt.axhline(y=0.95, color='r', linestyle='--')
plt.show()

# 可视化降维结果
plt.figure(figsize=(10, 6))
plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y, cmap='viridis')
plt.xlabel('主成分1')
plt.ylabel('主成分2')
plt.colorbar()
plt.show()

模型评估

训练测试集划分

from sklearn.model_selection import train_test_split

# 基本划分
X_train, X_test, y_train, y_test = train_test_split(
X, y,
test_size=0.2, # 测试集比例
train_size=None, # 训练集比例(与test_size互斥)
random_state=42, # 随机种子
shuffle=True, # 是否打乱数据
stratify=y # 分层抽样(保持类别比例)
)

# 三划分(训练集、验证集、测试集)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.25, random_state=42)
# 最终比例: 训练60%, 验证20%, 测试20%

交叉验证

from sklearn.model_selection import cross_val_score, cross_validate, KFold, StratifiedKFold

# K折交叉验证
model = LogisticRegression(random_state=42)
scores = cross_val_score(
model, X, y,
cv=5, # 折数
scoring='accuracy', # 评估指标
n_jobs=-1 # 并行
)
print(f'准确率: {scores.mean():.2f} (+/- {scores.std():.2f})')

# 多指标交叉验证
scoring = ['accuracy', 'precision', 'recall', 'f1']
results = cross_validate(model, X, y, cv=5, scoring=scoring, return_train_score=True)
print(f'测试准确率: {results["test_accuracy"].mean():.2f}')

# KFold
kf = KFold(n_splits=5, shuffle=True, random_state=42)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]

# StratifiedKFold (分层抽样)
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
for train_index, test_index in skf.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]

# Leave-One-Out (留一法)
from sklearn.model_selection import LeaveOneOut
loo = LeaveOneOut()
scores = cross_val_score(model, X, y, cv=loo)

评估指标

from sklearn.metrics import (
accuracy_score, precision_score, recall_score, f1_score,
confusion_matrix, classification_report,
roc_curve, auc, roc_auc_score,
mean_squared_error, mean_absolute_error, r2_score
)

# 分类指标
y_true = [0, 1, 0, 1, 0, 1]
y_pred = [0, 1, 0, 0, 0, 1]

# 准确率
acc = accuracy_score(y_true, y_pred)

# 精确率
precision = precision_score(y_true, y_pred)

# 召回率
recall = recall_score(y_true, y_pred)

# F1分数
f1 = f1_score(y_true, y_pred)

# 混淆矩阵
cm = confusion_matrix(y_true, y_pred)
print(cm)
# [[3 1] [0 2]]

# 分类报告
report = classification_report(y_true, y_pred)
print(report)

# ROC曲线和AUC
y_pred_proba = model.predict_proba(X_test)[:, 1]
fpr, tpr, thresholds = roc_curve(y_test, y_pred_proba)
roc_auc = auc(fpr, tpr)

plt.plot(fpr, tpr, label=f'ROC curve (AUC = {roc_auc:.2f})')
plt.plot([0, 1], [0, 1], 'k--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend()
plt.show()

# 回归指标
y_true = [3, -0.5, 2, 7]
y_pred = [2.5, 0.0, 2, 8]

# 均方误差
mse = mean_squared_error(y_true, y_pred)

# 均方根误差
rmse = np.sqrt(mse)

# 平均绝对误差
mae = mean_absolute_error(y_true, y_pred)

# R2分数
r2 = r2_score(y_true, y_pred)

学习曲线

from sklearn.model_selection import learning_curve

# 学习曲线
train_sizes, train_scores, val_scores = learning_curve(
estimator=model,
X=X,
y=y,
train_sizes=np.linspace(0.1, 1.0, 10), # 训练集大小比例
cv=5,
scoring='accuracy',
n_jobs=-1
)

# 计算均值和标准差
train_mean = train_scores.mean(axis=1)
train_std = train_scores.std(axis=1)
val_mean = val_scores.mean(axis=1)
val_std = val_scores.std(axis=1)

# 绘制学习曲线
plt.figure(figsize=(10, 6))
plt.plot(train_sizes, train_mean, label='Training score')
plt.fill_between(train_sizes, train_mean - train_std, train_mean + train_std, alpha=0.1)
plt.plot(train_sizes, val_mean, label='Validation score')
plt.fill_between(train_sizes, val_mean - val_std, val_mean + val_std, alpha=0.1)
plt.xlabel('Training Set Size')
plt.ylabel('Accuracy')
plt.legend()
plt.show()

模型调优

网格搜索

from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC

# 定义参数网格
param_grid = {
'C': [0.1, 1, 10, 100],
'gamma': [1, 0.1, 0.01, 0.001],
'kernel': ['rbf', 'linear']
}

# 网格搜索
grid_search = GridSearchCV(
estimator=SVC(random_state=42),
param_grid=param_grid,
cv=5, # 交叉验证折数
scoring='accuracy', # 评估指标
n_jobs=-1, # 并行
refit=True, # 用最优参数在整个训练集上重新训练
verbose=1 # 输出进度
)
grid_search.fit(X_train_scaled, y_train)

# 最优参数和模型
print(f'最优参数: {grid_search.best_params_}')
print(f'最优分数: {grid_search.best_score_:.2f}')
best_model = grid_search.best_estimator_

# 所有结果
results = pd.DataFrame(grid_search.cv_results_)
print(results[['param_C', 'param_gamma', 'param_kernel', 'mean_test_score']])

随机搜索

from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform

# 定义参数分布
param_distributions = {
'C': uniform(loc=0, scale=10), # 均匀分布
'gamma': uniform(loc=0, scale=0.1),
'kernel': ['rbf', 'linear']
}

# 随机搜索
random_search = RandomizedSearchCV(
estimator=SVC(random_state=42),
param_distributions=param_distributions,
n_iter=50, # 迭代次数
cv=5,
scoring='accuracy',
n_jobs=-1,
random_state=42
)
random_search.fit(X_train_scaled, y_train)

print(f'最优参数: {random_search.best_params_}')
print(f'最优分数: {random_search.best_score_:.2f}')

贝叶斯优化

# 需要安装 scikit-optimize
# pip install scikit-optimize

from skopt import BayesSearchCV
from sklearn.svm import SVC

# 定义参数搜索空间
param_space = {
'C': (0.1, 100.0, 'log-uniform'), # 对数均匀分布
'gamma': (0.001, 1.0, 'log-uniform'),
'kernel': ['rbf', 'linear']
}

# 贝叶斯优化
opt = BayesSearchCV(
estimator=SVC(random_state=42),
search_spaces=param_space,
n_iter=50, # 迭代次数
cv=5,
n_jobs=-1,
random_state=42
)
opt.fit(X_train_scaled, y_train)

print(f'最优参数: {opt.best_params_}')
print(f'最优分数: {opt.best_score_:.2f}')

管道

Pipeline

from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.svm import SVC

# 创建管道
pipe = Pipeline([
('scaler', StandardScaler()), # 数据标准化
('pca', PCA(n_components=2)), # PCA降维
('svm', SVC(random_state=42)) # SVM分类器
])

# 训练
pipe.fit(X_train, y_train)

# 预测
y_pred = pipe.predict(X_test)

# 查看管道步骤
print(pipe.steps)

# 访问管道中的步骤
scaler = pipe.named_steps['scaler']
svm_model = pipe.named_steps['svm']

# 网格搜索管道参数
param_grid = {
'pca__n_components': [2, 3, 4],
'svm__C': [0.1, 1, 10],
'svm__gamma': ['scale', 'auto']
}
grid_search = GridSearchCV(pipe, param_grid, cv=5)
grid_search.fit(X_train, y_train)

FeatureUnion

from sklearn.pipeline import FeatureUnion
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest

# 特征联合(并行处理)
combined_features = FeatureUnion([
('pca', PCA(n_components=2)),
('kbest', SelectKBest(k=3))
])

# 在管道中使用
pipe = Pipeline([
('features', combined_features),
('svm', SVC(random_state=42))
])
pipe.fit(X_train, y_train)

模型持久化

import joblib
from sklearn.linear_model import LogisticRegression

# 训练模型
model = LogisticRegression(random_state=42)
model.fit(X_train, y_train)

# 保存模型
joblib.dump(model, 'model.joblib')

# 加载模型
loaded_model = joblib.load('model.joblib')

# 使用加载的模型预测
y_pred = loaded_model.predict(X_test)

# 保存管道
pipe = Pipeline([('scaler', StandardScaler()), ('model', model)])
joblib.dump(pipe, 'pipeline.joblib')

# 使用pickle(不推荐用于大模型)
import pickle
with open('model.pkl', 'wb') as f:
pickle.dump(model, f)

with open('model.pkl', 'rb') as f:
loaded_model = pickle.load(f)

实战案例

分类任务

import pandas as pd
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report

# 加载数据
data = load_breast_cancer()
X, y = data.data, data.target

# 划分数据
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 创建管道
pipe = Pipeline([
('scaler', StandardScaler()),
('rf', RandomForestClassifier(random_state=42))
])

# 网格搜索
param_grid = {
'rf__n_estimators': [100, 200],
'rf__max_depth': [5, 10, None],
'rf__min_samples_split': [2, 5]
}
grid_search = GridSearchCV(pipe, param_grid, cv=5, n_jobs=-1)
grid_search.fit(X_train, y_train)

# 评估
y_pred = grid_search.predict(X_test)
print(classification_report(y_test, y_pred, target_names=data.target_names))

回归任务

from sklearn.datasets import fetch_california_housing
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error, r2_score

# 加载数据
housing = fetch_california_housing()
X, y = housing.data, housing.target

# 划分数据
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 训练模型
model = GradientBoostingRegressor(
n_estimators=100,
learning_rate=0.1,
max_depth=5,
random_state=42
)
model.fit(X_train, y_train)

# 预测和评估
y_pred = model.predict(X_test)
print(f'RMSE: {np.sqrt(mean_squared_error(y_test, y_pred)):.2f}')
print(f'R2: {r2_score(y_test, y_pred):.2f}')

聚类任务

from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt

# 生成数据
X, y_true = make_blobs(n_samples=500, centers=5, cluster_std=0.60, random_state=42)

# K-means聚类
kmeans = KMeans(n_clusters=5, random_state=42)
labels = kmeans.fit_predict(X)

# 可视化
plt.figure(figsize=(10, 6))
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='viridis')
centers = kmeans.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 1], c='red', s=200, alpha=0.5, marker='X')
plt.title('K-means聚类结果')
plt.show()

# 评估
score = silhouette_score(X, labels)
print(f'轮廓系数: {score:.2f}')

最佳实践

数据准备流程

# 推荐的数据准备流程
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler

# 1. 划分数据(先划分再处理,避免数据泄露)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 2. 只在训练集上fit scaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test) # 注意:使用相同的scaler

# 3. 训练模型
model.fit(X_train_scaled, y_train)

# 4. 评估
score = model.score(X_test_scaled, y_test)

防止过拟合

# 1. 交叉验证
from sklearn.model_selection import cross_val_score
scores = cross_val_score(model, X, y, cv=5)

# 2. 正则化
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(C=0.1) # C越小,正则化越强

# 3. 早停
from sklearn.ensemble import GradientBoostingClassifier
model = GradientBoostingClassifier(
n_estimators=1000,
validation_fraction=0.2,
n_iter_no_change=10, # 连续10次无改善则停止
early_stopping=True
)

# 4. 增加数据
from imblearn.over_sampling import SMOTE # 处理类别不平衡
smote = SMOTE()
X_resampled, y_resampled = smote.fit_resample(X, y)

处理不平衡数据

from imblearn.over_sampling import SMOTE, RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from sklearn.utils.class_weight import compute_class_weight

# 1. 类权重
class_weights = compute_class_weight('balanced', classes=np.unique(y), y=y)
model = LogisticRegression(class_weight='balanced')

# 2. 过采样
smote = SMOTE(random_state=42)
X_resampled, y_resampled = smote.fit_resample(X, y)

# 3. 欠采样
rus = RandomUnderSampler(random_state=42)
X_resampled, y_resampled = rus.fit_resample(X, y)

# 4. 混合采样
from imblearn.combine import SMOTETomek
smt = SMOTETomek(random_state=42)
X_resampled, y_resampled = smt.fit_resample(X, y)

常见陷阱

# 1. 数据泄露
# 错误: 先标准化再划分数据
# 正确: 先划分数据再标准化

# 2. 忘记设置random_state
# 错误: model = LogisticRegression()
# 正确: model = LogisticRegression(random_state=42)

# 3. 忽略数据预处理
# SVM和KNN等算法需要标准化数据

# 4. 过拟合
# 使用交叉验证、正则化、早停等方法

# 5. 评估指标选择
# 不平衡数据应使用F1、AUC而非准确率

总结

Scikit-learn 是 Python 机器学习的标准库:

核心概念

  • 统一API: fit、predict、transform 一致接口
  • 监督学习: 分类和回归算法
  • 无监督学习: 聚类和降维算法
  • 模型评估: 交叉验证和各种评估指标
  • 模型调优: 网格搜索、随机搜索、贝叶斯优化

主要功能

  1. 数据预处理: 标准化、归一化、编码、填补缺失值
  2. 监督学习: 线性模型、树模型、SVM、KNN、朴素贝叶斯
  3. 无监督学习: K-means、层次聚类、DBSCAN、PCA
  4. 模型评估: 划分数据、交叉验证、评估指标
  5. 模型调优: 网格搜索、随机搜索、贝叶斯优化
  6. 管道机制: Pipeline 和 FeatureUnion
  7. 模型持久: joblib 保存和加载模型

最佳实践

  1. 先划分数据再进行预处理
  2. 使用交叉验证评估模型性能
  3. 使用网格搜索调优超参数
  4. 处理类别不平衡问题
  5. 使用管道简化工作流
  6. 保存训练好的模型

Scikit-learn 是机器学习入门和实践的首选库,掌握 Scikit-learn 是进行机器学习项目的基础。