写在前面:尝试了一下,4090应该是秒挂,需要两张卡。
用CDS API下载ERA5数据
配置cds api
在ERA5官网右上角创建账户并登录,在首页点击数据集
搜索一个数据集,勾选想下载的部分,在API requests中点击documentation page,进入API配置环节。
在Setup the CDS API personal access token部分,可以查看自己的url与key,这是用来登录的许可证,ubuntu用户需要创建一个新的环境文件.cdsapirc,复制url与key并存入环境文件中。
安装cdsapi
打开虚拟环境,运行:
pip install cdsapi
下载数据
新建python文件,保存并运行:
import cdsapi
dataset = "reanalysis-era5-pressure-levels"
request = {
'product_type': ['reanalysis'],
'data_format': 'grib',
'download_format': 'unarchived'
}
client = cdsapi.Client()
client.retrieve(dataset, request).download()
过程中可能会出现环境文件的报错,只需要在报错中提示的目录下重建环境文件.cdsapirc即可。
nc文件转npy文件
调用xrarray读取nc数据,再用np.concatenate函数在valid_time(axis=0)维度上拼接,得到三维的surface数据和四维的upper_air数据。
import xarray as xr
import numpy as np
import os
target_dir = "target_data"
ds_surface = xr.open_dataset(os.path.join(target_dir, "target_surface.nc"))
print(ds_surface)
# input_surface.npy stores the input surface variables.
# It is a numpy array shaped (4,721,1440) where the first dimension represents the 4 surface variables
# (MSLP, U10, V10, T2M in the exact order).
array_surface = np.concatenate(
[ds_surface[v].values for v in ["msl", "u10", "v10", "t2m"]], axis=0
)
print(f'array_surface.shape={array_surface.shape}')
# input_upper.npy stores the upper-air variables.
# It is a numpy array shaped (5,13,721,1440) where the first dimension represents the 5 surface variables
# (Z, Q, T, U and V in the exact order), and the second dimension represents the 13 pressure levels
# (1000hPa, 925hPa, 850hPa, 700hPa, 600hPa, 500hPa, 400hPa, 300hPa, 250hPa, 200hPa, 150hPa, 100hPa and 50hPa in the exact order).
ds_upper = xr.open_dataset(os.path.join(target_dir, "target_upper.nc")).sortby("pressure_level", ascending=False)
print(ds_upper)
array_upper = np.concatenate(
[ds_upper[v].values for v in ["z", "q", "t", "u", "v"]], axis=0
)
print(f'array_upper.shape={array_upper.shape}')
np.save(os.path.join(target_dir, "target_surface.npy"), array_surface)
np.save(os.path.join(target_dir, "target_upper.npy"), array_upper)
盘古微调代码详解
CPU微调代码如下,来自:WeatherLearn/finetune/finetune_cpu.py at master · lizhuoq/WeatherLearn · GitHub
import onnx
from onnx2torch import convert
import numpy as np
import os
import torch
from torch import optim
from torch import nn
from tqdm import tqdm
import json
import argparse
from tools import adjust_learning_rate
parser = argparse.ArgumentParser(description='finetune')
parser.add_argument('--learning_rate', type=float, default=1e-6, help='optimizer learning rate')
parser.add_argument('--lradj', type=str, default='cosine', help='adjust learning rate')
parser.add_argument('--train_epochs', type=int, default=100, help='train epochs')
args = parser.parse_args()
print(args)
model_path = "pangu_weather_1.onnx"
onnx_model = onnx.load(model_path)
torch_model = convert(onnx_model, True)
input_data_dir = "input_data"
output_data_dir = "output_data"
target_data_dir = "target_data"
# Load the upper-air numpy arrays
input = np.load(os.path.join(input_data_dir, 'input_upper.npy')).astype(np.float32)
# Load the surface numpy arrays
input_surface = np.load(os.path.join(input_data_dir, 'input_surface.npy')).astype(np.float32)
target = np.load(os.path.join(target_data_dir, "target_upper.npy")).astype(np.float32)
target_surface = np.load(os.path.join(target_data_dir, 'target_surface.npy')).astype(np.float32)
target = torch.tensor(target)
target_surface = torch.tensor(target_surface)
model_optim = optim.Adam(torch_model.parameters(), lr=args.learning_rate)
criterion = nn.L1Loss()
torch_model.train()
train_loss = []
for epoch in tqdm(range(args.train_epochs)):
# batch = 1
# device cpu
model_optim.zero_grad()
output, output_surface = torch_model(input=torch.tensor(input), input_surface=torch.tensor(input_surface))
# We use the MAE loss to train the model
# The weight of surface loss is 0.25
# Different weight can be applied for differen fields if needed
loss = criterion(output, target) + criterion(output_surface, target_surface) * 0.25
loss.backward()
model_optim.step()
train_loss.append(loss.item())
adjust_learning_rate(model_optim, epoch + 1, args)
print("Epoch: {0} | Train Loss: {1:.7f}".format(
epoch + 1, loss.item()))
with open("train_results.json", "w") as f:
json.dump(train_loss, f)
采用
torch.tensor().to(device)
将微调转换成GPU加速,如下:
import onnx
from onnx2torch import convert
import numpy as np
import os
import torch
from torch import optim
from torch import nn
from tqdm import tqdm
import json
import argparse
from tools import adjust_learning_rate
parser = argparse.ArgumentParser(description='finetune')
parser.add_argument('--learning_rate', type=float, default=1e-6, help='optimizer learning rate')
parser.add_argument('--lradj', type=str, default='cosine', help='adjust learning rate')
parser.add_argument('--train_epochs', type=int, default=100, help='train epochs')
args = parser.parse_args()
print(args)
# 检查是否有GPU可用,并设置设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Using device: {device}")
model_path = "pangu_weather_1.onnx"
onnx_model = onnx.load(model_path)
torch_model = convert(onnx_model, True).to(device) # 转换模型并转移到GPU上
input_data_dir = "input_data"
output_data_dir = "output_data"
target_data_dir = "target_data"
# 加载输入和目标数据,并转移到GPU上
input = np.load(os.path.join(input_data_dir, 'input_upper.npy')).astype(np.float32)
input_surface = np.load(os.path.join(input_data_dir, 'input_surface.npy')).astype(np.float32)
target = np.load(os.path.join(target_data_dir, "target_upper.npy")).astype(np.float32)
target_surface = np.load(os.path.join(target_data_dir, 'target_surface.npy')).astype(np.float32)
# 将数据转换为tensor并转移到GPU
input = torch.tensor(input).to(device)
input_surface = torch.tensor(input_surface).to(device)
target = torch.tensor(target).to(device)
target_surface = torch.tensor(target_surface).to(device)
model_optim = optim.Adam(torch_model.parameters(), lr=args.learning_rate)
criterion = nn.L1Loss()
torch_model.train()
train_loss = []
for epoch in tqdm(range(args.train_epochs)):
model_optim.zero_grad()
# 计算模型输出并转移到GPU
output, output_surface = torch_model(input=input, input_surface=input_surface)
# 计算损失
loss = criterion(output, target) + criterion(output_surface, target_surface) * 0.25
# 反向传播和优化
loss.backward()
model_optim.step()
# 记录训练损失
train_loss.append(loss.item())
# 调整学习率
adjust_learning_rate(model_optim, epoch + 1, args)
print("Epoch: {0} | Train Loss: {1:.7f}".format(epoch + 1, loss.item()))
# 保存训练结果
with open("train_results.json", "w") as f:
json.dump(train_loss, f)
参考WeatherLearn/finetune构建好环境、下载好数据之后运行上述代码,即可微调。