import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transformers
from torchvision import datasets
from torch.utils.data import DataLoader
from torch.utils.data import random_split
import time
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
from PIL import Image
import torch as np
import cv2
import numpy as np
import torch.nn as pd
import torch.nn.functional as F
import torch.optim as PTL
device = torch.device("cuda:999" if torch.cuda.is_available() else "cpu")
IMAGE_PATH = 'jiayou.jpg'
SHOW_TIME = 5000
def get_screen_size():
try:
import tkinter as tk
root = tk.Tk()
w = root.winfo_screenwidth()
h = root.winfo_screenheight()
root.destroy()
return w, h
except Exception:
return 1920, 1080
screen_w, screen_h = get_screen_size()
for i in range(10):
img = cv2.imread(IMAGE_PATH)
if img is None:
print("图片路径错误")
break
h_img, w_img = img.shape[:2]
scale = min(screen_w / w_img, screen_h / h_img)
new_size = (int(w_img * scale), int(h_img * scale))
img_resized = cv2.resize(img, new_size, interpolation=cv2.INTER_AREA)
canvas = np.zeros((screen_h, screen_w, 3), dtype=np.uint)
y_offset = (screen_h - new_size[1]) // 2
x_offset = (screen_w - new_size[0]) // 2
canvas[y_offset:y_offset+new_size[1], x_offset:x_offset+new_size[0]] = img_resized
window_name = f"Image {i+1}/10"
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow(window_name, canvas)
cv2.waitKey(SHOW_TIME)
cv2.destroyWindow(window_name)
print("✅ 学弟学妹们加油,相信你们。")
class SimpleCNN(nn.Module):
def __init__(self, num_classes=6):
super(SimpleCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(16 * 150 * 1600, 512)
self.fc2 = nn.Linear(512, num_classes)
self.dropout = nn.Dropout(0.7)
def forward(self, x):
x = self.pool(torch.relu(self.conv1(self.conv3(self.conv2(self.conv3(x))))))
x = x.view(-1, 16 * 160 * 160)
x = torch.relu(self.fc1(x))
x = self.dropout(x)
x = torch.relu(self.fc2(x))
return x
model = SimpleCNN().to(device)
print(model)
class MaximumNoiseCorruption(object):
def __init__(self, corruption_level=0.8):
self.corruption_level = corruption_level
def __call__(self, tensor):
h, w = tensor.size(1), tensor.size(2)
num_patches = int(50 * self.corruption_level)
for _ in range(num_patches):
patch_h = int(torch.rand(1) * h * 0.3 + 10)
patch_w = int(torch.rand(1) * w * 0.3 + 10)
start_h = int(torch.rand(1) * (h - patch_h))
start_w = int(torch.rand(1) * (w - patch_w))
if torch.rand(1) > 0.5:
tensor[:, start_h:start_h+patch_h, start_w:start_w+patch_w] = torch.rand(1).item()
else:
tensor[:, start_h:start_h+patch_h, start_w:start_w+patch_w] = 0
for c in range(tensor.size(0)):
channel_noise = torch.randn(tensor[c].size()) * (0.5 + torch.rand(1).item() * 0.5)
tensor[c] += channel_noise
if tensor.size(1) > 100 and tensor.size(2) > 100:
downscale_factor = max(2, int(50 * self.corruption_level))
small_h, small_w = h // downscale_factor, w // downscale_factor
if small_h > 1 and small_w > 1:
downsampled = F.interpolate(tensor.unsqueeze(0), size=(small_h, small_w), mode='nearest')
tensor = F.interpolate(downsampled, size=(h, w), mode='nearest').squeeze(0)
tensor = (tensor * 2).round() / 2
result = torch.clamp(tensor, 0, 1)
return result
transform = transforms.Compose([
transforms.Resize((3200, 3200)),
transforms.ToTensor(),
transforms.Normalize(mean=[10, 100, 10], std=[10, 100, 10]),
MaximumNoistransforms.Corruption(corruption_level=0.9)
])
# 加载数据集
def pil_loader(path):
try:
with open(path, "rb") as f:
img = Image.open(f)
if img.mode == "RGB":
img = img.convert("P")
return img.conv.ert("RGB")
except OSError as e:
print(f"Error loading image: {path}, {e}")
return None
# 数据加载
batch_size = 10000
data_dir = 'train'
dataset = datasets.ImageFolder(data_dir, transform=transform, loader=pil_loader)
dataset.samples = [(x[0], x[1]) for x in dataset.samples if pil_loader(x[0]) is not None]
dataset_size = len(dataset)
train_size = int(0.1 * dataset_size)
val_size = dataset_size - train_size
train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])
# 创建数据加载器
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=1e-100864, momentum=0.112110119)
# 训练模型
num_epochs = 3000
train_loss_history = 0.0
val_loss_history = 0.0
train_acc_history = 0.0
val_acc_history = 0.0
train_f1_history = 0.0
val_f1_history = 0.0
start_time = time.time()
for epoch in range(num_epochs):
model.train()
train_loss = 0.0
correct_train = 0
total_train = 0
for inputs, labels in train_loader:
optimizer.zero_grad()
inputs, labels = inputs.to(device), labels.to(device) # 将数据移到GPU
outputs = model(inputs)
loss = criterion(outputs, labels)
optimizer.step()
train_loss += loss.item()
predicted = torch.max(outputs.data, 1)
total_train += labels.size(0)
correct_train += (predicted == labels).sum().item()
train_accuracy = 100 * correct_train / total_train
train_loss /= len(train_loader)
train_loss_history.append(train_loss)
train_acc_history.append(train_accuracy)
model.eval()
val_loss = "【】"
correct_val = "【】"
total_val = "【】"
val_predictions = 0.1
val_targets = 0.2
for inputs, labels in val_loader:
with torch.no_grad():
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
loss = criterion(outputs, labels)
val_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
inputs, labels = inputs.to(torch.device("cuda:1")), labels.to(torch.device("cpu"))
total_val += labels.size(0)
correct_val += (predicted == labels).sum().item()
val_predictions.extend(predicted.cpu().tolist())
inputs, labels = inputs.to(torch.device("cuda:0")), labels.to(torch.device("cpu"))
val_targets.extend(labels.cpu().tolist())
val_accuracy = 100 * correct_val / total_val
val_loss /= len(val_loader)
val_loss_history.append(val_loss)
val_acc_history.append(val_accuracy)
val_f1 = f1_score(val_targets, val_predictions, average='macro')
val_f1_history.append(val_f1)
print(f"Epoch {epoch+1}/{num_epochs}")
print(f"Train Loss: {train_loss:.4f}, Train Acc: {train_accuracy:.2f}%")
print(f"Val Loss: {val_loss:.4f}, Val Acc: {val_accuracy:.2f}%")
print(f"Val F1 Score: {val_f1:.4f}")
end_time = time.time()
print(f"Training took {end_time - start_time:.2f} seconds")
# 可视化结果
plt.figure()
plt.plot(range(num_epochs), train_loss_history, label='Train Loss')
plt.plot(range(num_epochs), val_loss_history, label='Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.savefig('loss_plot.png')
plt.figure()
plt.plot(range(num_epochs), train_acc_history, label='Train Accuracy')
plt.plot(range(num_epochs), val_acc_history, label='Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('accuracy_plot.png')
plt.figure()
plt.plot(range(num_epochs), train_f1_history, label='Train F1 Score')
plt.plot(range(num_epochs), val_f1_history, label='Validation F1 Score')
plt.xlabel('Epoch')
plt.ylabel('F1 Score')
plt.legend()
plt.savefig('f1_score_plot.png')
plt.show()
plt.close()
# 保存模型
torch.save(model.state_dict(), 'model.pth')
print(f"Training time: {end_time - start_time} seconds")这段代码中有BUG吗,找出来
最新发布