day32
X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=0.5, random_state=42)# 50%验证集,50%测试集。continuous_features = data.select_dtypes(include=['int64', 'float64']).columns.tolist()#把
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import time
import matplotlib.pyplot as plt
from tqdm import tqdm # 导入tqdm库用于进度条显示
import warnings
warnings.filterwarnings("ignore") # 忽略警告信息
# 设置GPU设备
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")
import pandas as pd
# 加载信贷数据集
data = pd.read_csv(r"E:\PythonStudy\python60-days-challenge-master\data.csv")
import numpy as np #用于数值计算,提供了高效的数组操作。
# 先筛选字符串变量
discrete_features = data.select_dtypes(include=['object']).columns.tolist()
# Home Ownership 标签编码
home_ownership_mapping = {
'Own Home': 1,
'Rent': 2,
'Have Mortgage': 3,
'Home Mortgage': 4
}
data['Home Ownership'] = data['Home Ownership'].map(home_ownership_mapping)
# Years in current job 标签编码
years_in_job_mapping = {
'< 1 year': 1,
'1 year': 2,
'2 years': 3,
'3 years': 4,
'4 years': 5,
'5 years': 6,
'6 years': 7,
'7 years': 8,
'8 years': 9,
'9 years': 10,
'10+ years': 11
}
data['Years in current job'] = data['Years in current job'].map(years_in_job_mapping)
# Purpose 独热编码,记得需要将bool类型转换为数值
data = pd.get_dummies(data, columns=['Purpose'])
data2 = pd.read_csv(r"E:\PythonStudy\python60-days-challenge-master\data.csv") # 重新读取数据,用来做列名对比
list_final = [] # 新建一个空列表,用于存放独热编码后新增的特征名
for i in data.columns:
if i not in data2.columns:
list_final.append(i) # 这里打印出来的就是独热编码后的特征名
for i in list_final:
data[i] = data[i].astype(int) # 这里的i就是独热编码后的特征名
# Term 0 - 1 映射
term_mapping = {
'Short Term': 0,
'Long Term': 1
}
data['Term'] = data['Term'].map(term_mapping)
data.rename(columns={'Term': 'Long Term'}, inplace=True) # 重命名列
continuous_features = data.select_dtypes(include=['int64', 'float64']).columns.tolist() #把筛选出来的列名转换成列表
# 连续特征用中位数补全
for feature in continuous_features:
mode_value = data[feature].mode()[0] #获取该列的众数。
data[feature].fillna(mode_value, inplace=True) #用众数填充该列的缺失值,inplace=True表示直接在原数据上修改。
# 最开始也说了 很多调参函数自带交叉验证,甚至是必选的参数,你如果想要不交叉反而实现起来会麻烦很多
# 所以这里我们还是只划分一次数据集
from sklearn.model_selection import train_test_split
X = data.drop(['Credit Default'], axis=1) # 特征,axis=1表示按列删除
y = data['Credit Default'] # 标签
# 按照7:3划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
#将测试集1:1划分验证集和测试集
X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=0.5, random_state=42) # 50%验证集,50%测试集
#归一化处理
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
X_test = scaler.transform(X_test)
X_train = torch.tensor(X_train, dtype=torch.float32).to(device)
X_val = torch.tensor(X_val, dtype=torch.float32).to(device)
X_test = torch.tensor(X_test, dtype=torch.float32).to(device)
y_train = torch.tensor(y_train.values, dtype=torch.float32).view(-1, 1).to(device)
y_val = torch.tensor(y_val.values, dtype=torch.float32).view(-1, 1).to(device)
y_test = torch.tensor(y_test.values, dtype=torch.float32).view(-1, 1).to(device)
#定义神经网络模型
# 定义模型(简单的3层全连接网络,相当于“小炒锅”,足够炒信贷数据这个菜)
import torch.nn as nn
class CreditModel(nn.Module):
def __init__(self, input_dim):
super(CreditModel, self).__init__()
# 网络层:输入层→隐藏层1→隐藏层2→输出层(二分类只有1个输出)
self.fc1 = nn.Linear(input_dim, 128) # 第一层:输入维度→128个神经元
self.fc2 = nn.Linear(128, 64) # 第二层:128→64
self.fc3 = nn.Linear(64, 1) # 输出层:64→1(预测是否违约)
self.relu = nn.ReLU() # 激活函数(让模型学复杂规律)
self.sigmoid = nn.Sigmoid() # 输出0~1之间的概率
def forward(self, x):
# 前向传播(数据在网络里的流动)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.sigmoid(self.fc3(x))
return x
# 实例化模型(把锅架起来)
model = CreditModel(X_train.shape[1]).to(device)
# 定义损失函数和优化器
criterion = nn.BCELoss() # 二分类任务的损失函数(适合判断违约)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001) # 优化器(让模型学的更快)
# 构建数据加载器
from torch.utils.data import TensorDataset, DataLoader
batch_size = 32
train_dataset = TensorDataset(X_train, y_train)
val_dataset = TensorDataset(X_val, y_val)
test_dataset = TensorDataset(X_test, y_test)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# 查看处理后的数据维度(确认没问题)
input_dim = X_train.shape[1]
print(f"模型输入维度:{input_dim}")
@浙大疏锦行
更多推荐

所有评论(0)