parser的可视化_get_kwargs的用法

本文介绍了一个使用argparse模块配置参数的示例,包括批量大小、图片尺寸、损失函数类型等关键参数设置及其默认值。
import argparse

parser=argparse.ArgumentParser()
parser.add_argument('--batch_size',type=int,default=1,help='batch size, default: 1')
parser.add_argument('--image_size',type=int,default=256,help='image size, default: 256')
parser.add_argument('--use_lsgan',type=bool,default=True,
                    help='use lsgan (mean squared error) or cross entropy loss, default: True')
parser.add_argument('--norm',type=str,default='instance',
                    help='[instance, batch] use instance norm or batch norm, default: instance')

parser.add_argument('--lambda1',type=float, default=10.0)

parser.add_argument('--lambda2',type=float, default=10.0)

parser.add_argument('--learning_rate',type=float, default=2e-4)
parser.add_argument('--beta1',type=float, default=0.5)
parser.add_argument('--pool_size',type=int,default=50)
parser.add_argument('--ngf',type=int,default=64,
                    help='number of gen filters in first conv layer, default: 64')

parser.add_argument('--x',type=str,default='datasets/man2woman/man.tfrecords')

parser.add_argument('--y',type=str,default='datasets/man2woman/woman.tfrecords')

parser.add_argument('--load_model',type=str,default='20170828-1941')


FLAGS, unparsed = parser.parse_known_args()
for k, v in FLAGS._get_kwargs():
    print(k, "=", v)

运行之后得到:

batch_size = 1
beta1 = 0.5
image_size = 256
lambda1 = 10.0
lambda2 = 10.0
learning_rate = 0.0002
load_model = 20170828-1941
ngf = 64
norm = instance
pool_size = 50
use_lsgan = True
x = datasets/man2woman/man.tfrecords
y = datasets/man2woman/woman.tfrecords
我的源代码是这个,请你修改 from __future__ import print_function, division from torch.cuda.amp import autocast import argparse import logging import numpy as np from pathlib import Path from tqdm import tqdm from core.utils.utils import bilinear_sampler from torch.utils.tensorboard import SummaryWriter import torch torch.cuda.empty_cache() import torch.nn as nn import torch.optim as optim from core.raft_stereo import RAFTStereo import os from PIL import Image from core.utils import frame_utils from evaluate_stereo import * import core.stereo_datasets as datasets from self_supervised_loss import compute_photometric_loss import torch.nn.functional as F try: from torch.cuda.amp import GradScaler except: # dummy GradScaler for PyTorch < 1.6 class GradScaler: def __init__(self): pass def scale(self, loss): return loss def unscale_(self, optimizer): pass def step(self, optimizer): optimizer.step() def update(self): pass # # def sequence_loss(flow_preds, flow_gt, valid, loss_gamma=0.9, max_flow=700): # """ Loss function defined over sequence of flow predictions """ # n_predictions = len(flow_preds) # assert n_predictions >= 1 # flow_loss = 0.0 # # exlude invalid pixels and extremely large diplacements # mag = torch.sum(flow_gt**2, dim=1).sqrt() # # exclude extremly large displacements # valid = ((valid >= 0.5) & (mag < max_flow)).unsqueeze(1) # assert valid.shape == flow_gt.shape, [valid.shape, flow_gt.shape] # assert not torch.isinf(flow_gt[valid.bool()]).any() # for i in range(n_predictions): # assert not torch.isnan(flow_preds[i]).any() and not torch.isinf(flow_preds[i]).any() # # We adjust the loss_gamma so it is consistent for any number of RAFT-Stereo iterations # adjusted_loss_gamma = loss_gamma**(15/(n_predictions - 1)) # i_weight = adjusted_loss_gamma**(n_predictions - i - 1) # i_loss = (flow_preds[i] - flow_gt).abs() # assert i_loss.shape == valid.shape, [i_loss.shape, valid.shape, flow_gt.shape, flow_preds[i].shape] # flow_loss += i_weight * i_loss[valid.bool()].mean() # epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt() # epe = epe.view(-1)[valid.view(-1)] # metrics = { # 'epe': epe.mean().item(), # '1px': (epe < 1).float().mean().item(), # '3px': (epe < 3).float().mean().item(), # '5px': (epe < 5).float().mean().item(), # } # return flow_loss, metrics def fetch_optimizer(args, model): """ Create the optimizer and learning rate scheduler """ optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=1e-8) scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps+100, pct_start=0.01, cycle_momentum=False, anneal_strategy='linear') return optimizer, scheduler class Logger: SUM_FREQ = 100 def __init__(self, model, scheduler): self.model = model self.scheduler = scheduler self.total_steps = 0 self.running_loss = {} self.writer = SummaryWriter(log_dir='runs') def _print_training_status(self): metrics_data = [self.running_loss[k]/Logger.SUM_FREQ for k in sorted(self.running_loss.keys())] training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_last_lr()[0]) metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data) # print the training status logging.info(f"Training Metrics ({self.total_steps}): {training_str + metrics_str}") if self.writer is None: self.writer = SummaryWriter(log_dir='runs') for k in self.running_loss: self.writer.add_scalar(k, self.running_loss[k]/Logger.SUM_FREQ, self.total_steps) self.running_loss[k] = 0.0 def push(self, metrics): self.total_steps += 1 for key in metrics: if key not in self.running_loss: self.running_loss[key] = 0.0 self.running_loss[key] += metrics[key] if self.total_steps % Logger.SUM_FREQ == Logger.SUM_FREQ-1: self._print_training_status() self.running_loss = {} def write_dict(self, results): if self.writer is None: self.writer = SummaryWriter(log_dir='runs') for key in results: self.writer.add_scalar(key, results[key], self.total_steps) def close(self): self.writer.close() def train(args): os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:32' model = nn.DataParallel(RAFTStereo(args)) print("Parameter Count: %d" % count_parameters(model)) train_loader = datasets.fetch_dataloader(args) optimizer, scheduler = fetch_optimizer(args, model) total_steps = 0 logger = Logger(model, scheduler) if args.restore_ckpt is not None: assert args.restore_ckpt.endswith(".pth") logging.info("Loading checkpoint...") checkpoint = torch.load(args.restore_ckpt) model.load_state_dict(checkpoint, strict=True) logging.info(f"Done loading checkpoint") model.cuda() model.train() model.module.freeze_bn() # We keep BatchNorm frozen validation_frequency = 10000 scaler = GradScaler(enabled=args.mixed_precision) should_keep_training = True global_batch_num = 0 while should_keep_training: for i_batch, data in enumerate(tqdm(train_loader)): optimizer.zero_grad() image1, image2, zero1, zero2 = [x.cuda() for x in data] with autocast(enabled=args.mixed_precision): flow_predictions = model(image1, image2, iters=args.train_iters) disp_pred = flow_predictions[-1][:, 0:1] # [B,1,H,W] loss_dict = compute_photometric_loss(image1, image2, disp_pred) loss = loss_dict['total_loss'] recon_img = loss_dict['reconstruction'] metrics = {'train_loss': loss.item()} logger.writer.add_scalar(f'learning_rate', optimizer.param_groups[0]['lr'], global_batch_num) global_batch_num += 1 scaler.scale(loss).backward() scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) scaler.step(optimizer) scheduler.step() scaler.update() # train_stereo.py 约第170行 logger.push({ 'total_loss': loss.item(), 'l1_loss': loss_dict['l1_loss'].item(), 'smooth_loss': loss_dict['smooth_loss'].item(), 'consistency_loss': loss_dict['consistency_loss'].item() }) # if total_steps % validation_frequency == validation_frequency - 1: # save_path = Path('checkpoints/%d_%s.pth' % (total_steps + 1, args.name)) # logging.info(f"Saving file {save_path.absolute()}") # torch.save(model.state_dict(), save_path) # results = validate_things(model.module, iters=args.valid_iters) # logger.write_dict(results) # model.train() # model.module.freeze_bn() # 自监督验证(示例) if global_batch_num % 50 == 0: torch.cuda.empty_cache() allocated = torch.cuda.memory_allocated() / 1024**2 reserved = torch.cuda.memory_reserved() / 1024**2 print(f"[GPU Memory] Allocated: {allocated:.1f} MB | Reserved: {reserved:.1f} MB") # ✅ 可选验证可视化保存 if (global_batch_num % 1000) == 0 and not args.disable_validation: os.makedirs('outputs', exist_ok=True) disp_pred = F.interpolate(flow_predictions[-1], size=(384, 512), mode='bilinear') torch.save(disp_pred, f'outputs/step_{global_batch_num}.pt') disp_np = disp_pred[0].squeeze().cpu().numpy() disp_color = frame_utils.visualize_disp(disp_np) Image.fromarray(disp_color).save(f'outputs/step_{global_batch_num}.png') save_path = Path(f'checkpoints/{args.name}_step{global_batch_num}.pth') torch.save(model.state_dict(), save_path) logger.writer.add_images('reconstruction', recon_img[:3], global_batch_num) total_steps += 1 if total_steps > args.num_steps: should_keep_training = False break if len(train_loader) >= 10000: save_path = Path('checkpoints/%d_epoch_%s.pth.gz' % (total_steps + 1, args.name)) logging.info(f"Saving file {save_path}") torch.save(model.state_dict(), save_path) print("FINISHED TRAINING") logger.close() PATH = 'checkpoints/%s.pth' % args.name torch.save(model.state_dict(), PATH) return PATH if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--name', default='my_selfsupervised', help="name your experiment") parser.add_argument('--restore_ckpt', default='models/raftstereo-eth3d.pth', help="restore checkpoint") parser.add_argument('--mixed_precision', action='store_true', default=True, help='use mixed precision') # Training parameters parser.add_argument('--batch_size', type=int, default=1, help="batch size used during training.") parser.add_argument('--train_datasets', nargs='+', default=['my_stereo'], help="training datasets.") parser.add_argument('--train_dataset_dir', type=str, default=r'C:\Users\FILWYZ\Desktop\my_stereo', help="Root directory for my_stereo dataset") parser.add_argument('--lr', type=float, default=0.00015, help="max learning rate.") parser.add_argument('--num_steps', type=int, default=8000, help="length of training schedule.") parser.add_argument('--image_size', type=int, nargs='+', default=[100], help="size of the random image crops used during training.") parser.add_argument('--train_iters', type=int, default=16, help="number of updates to the disparity field in each forward pass.") parser.add_argument('--wdecay', type=float, default=0.00005, help="Weight decay in optimizer.") # Validation parameters parser.add_argument('--valid_iters', type=int, default=32, help='number of flow-field updates during validation forward pass') parser.add_argument('--disable_validation', action='store_true', default=True) parser.add_argument('--flip_prob', type=float, default=0.5, help='horizontal flip probability') # Architecture choices parser.add_argument('--corr_implementation', choices=["reg", "alt", "reg_cuda", "alt_cuda"], default="reg", help="correlation volume implementation") parser.add_argument('--shared_backbone', action='store_true', help="use a single backbone for the context and feature encoders") parser.add_argument('--corr_levels', type=int, default=4, help="number of levels in the correlation pyramid") parser.add_argument('--corr_radius', type=int, default=4, help="width of the correlation pyramid") parser.add_argument('--n_downsample', type=int, default=2, help="resolution of the disparity field (1/2^K)") parser.add_argument('--context_norm', type=str, default="batch", choices=['group', 'batch', 'instance', 'none'], help="normalization of context encoder") parser.add_argument('--slow_fast_gru', action='store_true', help="iterate the low-res GRUs more frequently") parser.add_argument('--n_gru_layers', type=int, default=3, help="number of hidden GRU levels") parser.add_argument('--hidden_dims', nargs='+', type=int, default=[128]*3, help="hidden state and context dimensions") parser.add_argument('--save_interval', type=int, default=1000, help='model saving interval') # Data augmentation parser.add_argument('--img_gamma', type=float, nargs='+', default=None, help="gamma range") parser.add_argument('--saturation_range', type=float, nargs='+', default=None, help='color saturation') parser.add_argument('--do_flip', default='h', choices=['h', 'v'], help='flip the images horizontally or vertically') parser.add_argument('--spatial_scale', type=float, nargs='+', default=[0, 0], help='re-scale the images randomly') parser.add_argument('--noyjitter', action='store_true', default=True, help='don\'t simulate imperfect rectification') args = parser.parse_args() torch.manual_seed(1234) np.random.seed(1234) logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s') Path("checkpoints").mkdir(exist_ok=True, parents=True) train(args)
07-14
"""Script to train RL agent with RSL-RL.""" """Launch Isaac Sim Simulator first.""" import argparse import sys from isaaclab.app import AppLauncher # local imports import cli_args # isort: skip # add argparse arguments parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.") parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.") parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).") parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).") parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") parser.add_argument("--max_iterations", type=int, default=None, help="RL Policy training iterations.") # append RSL-RL cli arguments cli_args.add_rsl_rl_args(parser) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) args_cli, hydra_args = parser.parse_known_args() # always enable cameras to record video if args_cli.video: args_cli.enable_cameras = True # clear out sys.argv for Hydra sys.argv = [sys.argv[0]] + hydra_args # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" import gymnasium as gym import os import torch from datetime import datetime from rsl_rl.runners import OnPolicyRunner from isaaclab.envs import ( DirectMARLEnv, DirectMARLEnvCfg, DirectRLEnvCfg, ManagerBasedRLEnvCfg, multi_agent_to_single_agent, ) from isaaclab.utils.dict import print_dict from isaaclab.utils.io import dump_pickle, dump_yaml from isaaclab_rl.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlVecEnvWrapper from isaaclab_tasks.utils import get_checkpoint_path from isaaclab_tasks.utils.hydra import hydra_task_config # Import extensions to set up environment tasks import berkeley_humanoid_lite.tasks # noqa: F401 torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True torch.backends.cudnn.deterministic = False torch.backends.cudnn.benchmark = False @hydra_task_config(args_cli.task, "rsl_rl_cfg_entry_point") def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agent_cfg: RslRlOnPolicyRunnerCfg): """Train with RSL-RL agent.""" # override configurations with non-hydra CLI arguments agent_cfg = cli_args.update_rsl_rl_cfg(agent_cfg, args_cli) env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs agent_cfg.max_iterations = ( args_cli.max_iterations if args_cli.max_iterations is not None else agent_cfg.max_iterations ) # set the environment seed # note: certain randomizations occur in the environment initialization so we set the seed here env_cfg.seed = agent_cfg.seed env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device # specify directory for logging experiments log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name) log_root_path = os.path.abspath(log_root_path) print(f"[INFO] Logging experiment in directory: {log_root_path}") # specify directory for logging runs: {time-stamp}_{run_name} log_dir = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") if agent_cfg.run_name: log_dir += f"_{agent_cfg.run_name}" log_dir = os.path.join(log_root_path, log_dir) # create isaac environment env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None) # wrap for video recording if args_cli.video: video_kwargs = { "video_folder": os.path.join(log_dir, "videos", "train"), "step_trigger": lambda step: step % args_cli.video_interval == 0, "video_length": args_cli.video_length, "disable_logger": True, } print("[INFO] Recording videos during training.") print_dict(video_kwargs, nesting=4) env = gym.wrappers.RecordVideo(env, **video_kwargs) # convert to single-agent instance if required by the RL algorithm if isinstance(env.unwrapped, DirectMARLEnv): env = multi_agent_to_single_agent(env) # wrap around environment for rsl-rl env = RslRlVecEnvWrapper(env) # create runner from rsl-rl runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=log_dir, device=agent_cfg.device) # write git state to logs runner.add_git_repo_to_log(__file__) # save resume path before creating a new log_dir if agent_cfg.resume: # get path to previous checkpoint resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint) print(f"[INFO]: Loading model checkpoint from: {resume_path}") # load previously trained model runner.load(resume_path) # dump the configuration into log-directory dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg) dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), agent_cfg) dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg) dump_pickle(os.path.join(log_dir, "params", "agent.pkl"), agent_cfg) # run training runner.learn(num_learning_iterations=agent_cfg.max_iterations, init_at_random_ep_len=True) # close the simulator env.close() if __name__ == "__main__": # run the main function main() # close sim app simulation_app.close()这个程序干啥的
最新发布
08-23
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Benchmark offline inference throughput.""" import argparse import dataclasses import json import os import random import time import warnings from typing import Any, Optional, Union import torch import uvloop from tqdm import tqdm from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerBase from benchmark_dataset import ( AIMODataset, BurstGPTDataset, ConversationDataset, InstructCoderDataset, RandomDataset, SampleRequest, ShareGPTDataset, SonnetDataset, VisionArenaDataset, ) from benchmark_utils import convert_to_pytorch_benchmark_format, write_to_json from vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs from vllm.entrypoints.openai.api_server import ( build_async_engine_client_from_engine_args, ) from vllm.inputs import TextPrompt, TokensPrompt from vllm.lora.request import LoRARequest from vllm.outputs import RequestOutput from vllm.sampling_params import BeamSearchParams from vllm.utils import FlexibleArgumentParser, merge_async_iterators def run_vllm( requests: list[SampleRequest], n: int, engine_args: EngineArgs, disable_detokenize: bool = False, ) -> tuple[float, Optional[list[RequestOutput]]]: from vllm import LLM, SamplingParams llm = LLM(**dataclasses.asdict(engine_args)) assert all( llm.llm_engine.model_config.max_model_len >= (request.prompt_len + request.expected_output_len) for request in requests ), ( "Please ensure that max_model_len is greater than the sum of" " prompt_len and expected_output_len for all requests." ) # Add the requests to the engine. prompts: list[Union[TextPrompt, TokensPrompt]] = [] sampling_params: list[SamplingParams] = [] for request in requests: prompts.append( TokensPrompt( prompt_token_ids=request.prompt["prompt_token_ids"], multi_modal_data=request.multi_modal_data, ) if "prompt_token_ids" in request.prompt else TextPrompt( prompt=request.prompt, multi_modal_data=request.multi_modal_data ) ) sampling_params.append( SamplingParams( n=n, temperature=1.0, top_p=1.0, ignore_eos=True, max_tokens=request.expected_output_len, detokenize=not disable_detokenize, ) ) lora_requests: Optional[list[LoRARequest]] = None if engine_args.enable_lora: lora_requests = [request.lora_request for request in requests] use_beam_search = False outputs = None if not use_beam_search: start = time.perf_counter() outputs = llm.generate( prompts, sampling_params, lora_request=lora_requests, use_tqdm=True ) end = time.perf_counter() else: assert lora_requests is None, "BeamSearch API does not support LoRA" prompts = [request.prompt for request in requests] # output_len should be the same for all requests. output_len = requests[0].expected_output_len for request in requests: assert request.expected_output_len == output_len start = time.perf_counter() llm.beam_search( prompts, BeamSearchParams( beam_width=n, max_tokens=output_len, ignore_eos=True, ), ) end = time.perf_counter() return end - start, outputs def run_vllm_chat( requests: list[SampleRequest], n: int, engine_args: EngineArgs, disable_detokenize: bool = False, ) -> tuple[float, list[RequestOutput]]: """ Run vLLM chat benchmark. This function is recommended ONLY for benchmarking multimodal models as it properly handles multimodal inputs and chat formatting. For non-multimodal models, use run_vllm() instead. """ from vllm import LLM, SamplingParams llm = LLM(**dataclasses.asdict(engine_args)) assert all( llm.llm_engine.model_config.max_model_len >= (request.prompt_len + request.expected_output_len) for request in requests ), ( "Please ensure that max_model_len is greater than the sum of " "prompt_len and expected_output_len for all requests." ) prompts = [] sampling_params: list[SamplingParams] = [] for request in requests: prompts.append(request.prompt) sampling_params.append( SamplingParams( n=n, temperature=1.0, top_p=1.0, ignore_eos=True, max_tokens=request.expected_output_len, detokenize=not disable_detokenize, ) ) start = time.perf_counter() outputs = llm.chat(prompts, sampling_params, use_tqdm=True) end = time.perf_counter() return end - start, outputs async def run_vllm_async( requests: list[SampleRequest], n: int, engine_args: AsyncEngineArgs, disable_frontend_multiprocessing: bool = False, disable_detokenize: bool = False, ) -> float: from vllm import SamplingParams async with build_async_engine_client_from_engine_args( engine_args, disable_frontend_multiprocessing ) as llm: model_config = await llm.get_model_config() assert all( model_config.max_model_len >= (request.prompt_len + request.expected_output_len) for request in requests ), ( "Please ensure that max_model_len is greater than the sum of" " prompt_len and expected_output_len for all requests." ) # Add the requests to the engine. prompts: list[Union[TextPrompt, TokensPrompt]] = [] sampling_params: list[SamplingParams] = [] lora_requests: list[Optional[LoRARequest]] = [] for request in requests: prompts.append( TokensPrompt( prompt_token_ids=request.prompt["prompt_token_ids"], multi_modal_data=request.multi_modal_data, ) if "prompt_token_ids" in request.prompt else TextPrompt( prompt=request.prompt, multi_modal_data=request.multi_modal_data ) ) sampling_params.append( SamplingParams( n=n, temperature=1.0, top_p=1.0, ignore_eos=True, max_tokens=request.expected_output_len, detokenize=not disable_detokenize, ) ) lora_requests.append(request.lora_request) generators = [] start = time.perf_counter() for i, (prompt, sp, lr) in enumerate( zip(prompts, sampling_params, lora_requests) ): generator = llm.generate(prompt, sp, lora_request=lr, request_id=f"test{i}") generators.append(generator) all_gens = merge_async_iterators(*generators) async for i, res in all_gens: pass end = time.perf_counter() return end - start def run_hf( requests: list[SampleRequest], model: str, tokenizer: PreTrainedTokenizerBase, n: int, max_batch_size: int, trust_remote_code: bool, disable_detokenize: bool = False, ) -> float: llm = AutoModelForCausalLM.from_pretrained( model, torch_dtype=torch.float16, trust_remote_code=trust_remote_code ) if llm.config.model_type == "llama": # To enable padding in the HF backend. tokenizer.pad_token = tokenizer.eos_token llm = llm.cuda() pbar = tqdm(total=len(requests)) start = time.perf_counter() batch: list[str] = [] max_prompt_len = 0 max_output_len = 0 for i in range(len(requests)): prompt = requests[i].prompt prompt_len = requests[i].prompt_len output_len = requests[i].expected_output_len # Add the prompt to the batch. batch.append(prompt) max_prompt_len = max(max_prompt_len, prompt_len) max_output_len = max(max_output_len, output_len) if len(batch) < max_batch_size and i != len(requests) - 1: # Check if we can add more requests to the batch. next_prompt_len = requests[i + 1].prompt_len next_output_len = requests[i + 1].expected_output_len if ( max(max_prompt_len, next_prompt_len) + max(max_output_len, next_output_len) ) <= 2048: # We can add more requests to the batch. continue # Generate the sequences. input_ids = tokenizer(batch, return_tensors="pt", padding=True).input_ids llm_outputs = llm.generate( input_ids=input_ids.cuda(), do_sample=True, num_return_sequences=n, temperature=1.0, top_p=1.0, use_cache=True, max_new_tokens=max_output_len, ) if not disable_detokenize: # Include the decoding time. tokenizer.batch_decode(llm_outputs, skip_special_tokens=True) pbar.update(len(batch)) # Clear the batch. batch = [] max_prompt_len = 0 max_output_len = 0 end = time.perf_counter() return end - start def run_mii( requests: list[SampleRequest], model: str, tensor_parallel_size: int, output_len: int, ) -> float: from mii import client, serve llm = serve(model, tensor_parallel=tensor_parallel_size) prompts = [request.prompt for request in requests] start = time.perf_counter() llm.generate(prompts, max_new_tokens=output_len) end = time.perf_counter() client = client(model) client.terminate_server() return end - start def save_to_pytorch_benchmark_format( args: argparse.Namespace, results: dict[str, Any] ) -> None: pt_records = convert_to_pytorch_benchmark_format( args=args, metrics={ "requests_per_second": [results["requests_per_second"]], "tokens_per_second": [results["tokens_per_second"]], }, extra_info={ k: results[k] for k in ["elapsed_time", "num_requests", "total_num_tokens"] }, ) if pt_records: # Don't use json suffix here as we don't want CI to pick it up pt_file = f"{os.path.splitext(args.output_json)[0]}.pytorch.json" write_to_json(pt_file, pt_records) def get_requests(args, tokenizer): # Common parameters for all dataset types. common_kwargs = { "dataset_path": args.dataset_path, "random_seed": args.seed, } sample_kwargs = { "tokenizer": tokenizer, "lora_path": args.lora_path, "max_loras": args.max_loras, "num_requests": args.num_prompts, "input_len": args.input_len, "output_len": args.output_len, } if args.dataset_path is None or args.dataset_name == "random": sample_kwargs["range_ratio"] = args.random_range_ratio sample_kwargs["prefix_len"] = args.prefix_len dataset_cls = RandomDataset elif args.dataset_name == "sharegpt": dataset_cls = ShareGPTDataset if args.backend == "vllm-chat": sample_kwargs["enable_multimodal_chat"] = True elif args.dataset_name == "sonnet": assert tokenizer.chat_template or tokenizer.default_chat_template, ( "Tokenizer/model must have chat template for sonnet dataset." ) dataset_cls = SonnetDataset sample_kwargs["prefix_len"] = args.prefix_len sample_kwargs["return_prompt_formatted"] = True elif args.dataset_name == "burstgpt": dataset_cls = BurstGPTDataset elif args.dataset_name == "hf": common_kwargs["no_stream"] = args.no_stream if args.dataset_path in VisionArenaDataset.SUPPORTED_DATASET_PATHS: dataset_cls = VisionArenaDataset common_kwargs["dataset_subset"] = None common_kwargs["dataset_split"] = "train" sample_kwargs["enable_multimodal_chat"] = True elif args.dataset_path in InstructCoderDataset.SUPPORTED_DATASET_PATHS: dataset_cls = InstructCoderDataset common_kwargs["dataset_split"] = "train" elif args.dataset_path in ConversationDataset.SUPPORTED_DATASET_PATHS: dataset_cls = ConversationDataset common_kwargs["dataset_subset"] = args.hf_subset common_kwargs["dataset_split"] = args.hf_split sample_kwargs["enable_multimodal_chat"] = True elif args.dataset_path in AIMODataset.SUPPORTED_DATASET_PATHS: dataset_cls = AIMODataset common_kwargs["dataset_subset"] = None common_kwargs["dataset_split"] = "train" else: raise ValueError(f"Unknown dataset name: {args.dataset_name}") # Remove None values sample_kwargs = {k: v for k, v in sample_kwargs.items() if v is not None} return dataset_cls(**common_kwargs).sample(**sample_kwargs) def main(args: argparse.Namespace): if args.seed is None: args.seed = 0 print(args) random.seed(args.seed) # Sample the requests. tokenizer = AutoTokenizer.from_pretrained( args.tokenizer, trust_remote_code=args.trust_remote_code ) requests = get_requests(args, tokenizer) is_multi_modal = any(request.multi_modal_data is not None for request in requests) request_outputs: Optional[list[RequestOutput]] = None if args.backend == "vllm": if args.async_engine: elapsed_time = uvloop.run( run_vllm_async( requests, args.n, AsyncEngineArgs.from_cli_args(args), args.disable_frontend_multiprocessing, args.disable_detokenize, ) ) else: elapsed_time, request_outputs = run_vllm( requests, args.n, EngineArgs.from_cli_args(args), args.disable_detokenize, ) elif args.backend == "hf": assert args.tensor_parallel_size == 1 elapsed_time = run_hf( requests, args.model, tokenizer, args.n, args.hf_max_batch_size, args.trust_remote_code, args.disable_detokenize, ) elif args.backend == "mii": elapsed_time = run_mii( requests, args.model, args.tensor_parallel_size, args.output_len ) elif args.backend == "vllm-chat": elapsed_time, request_outputs = run_vllm_chat( requests, args.n, EngineArgs.from_cli_args(args), args.disable_detokenize ) else: raise ValueError(f"Unknown backend: {args.backend}") if request_outputs: # Note: with the vllm and vllm-chat backends, # we have request_outputs, which we use to count tokens. total_prompt_tokens = 0 total_output_tokens = 0 for ro in request_outputs: if not isinstance(ro, RequestOutput): continue total_prompt_tokens += ( len(ro.prompt_token_ids) if ro.prompt_token_ids else 0 ) total_output_tokens += sum(len(o.token_ids) for o in ro.outputs if o) total_num_tokens = total_prompt_tokens + total_output_tokens else: total_num_tokens = sum(r.prompt_len + r.expected_output_len for r in requests) total_output_tokens = sum(r.expected_output_len for r in requests) total_prompt_tokens = total_num_tokens - total_output_tokens if is_multi_modal and args.backend != "vllm-chat": print( "\033[91mWARNING\033[0m: Multi-modal request with " f"{args.backend} backend detected. The " "following metrics are not accurate because image tokens are not" " counted. See vllm-project/vllm/issues/9778 for details." ) # TODO(vllm-project/vllm/issues/9778): Count multi-modal token length. # vllm-chat backend counts the image tokens now print( f"Throughput: {len(requests) / elapsed_time:.2f} requests/s, " f"{total_num_tokens / elapsed_time:.2f} total tokens/s, " f"{total_output_tokens / elapsed_time:.2f} output tokens/s" ) print(f"Total num prompt tokens: {total_prompt_tokens}") print(f"Total num output tokens: {total_output_tokens}") # Output JSON results if specified if args.output_json: results = { "elapsed_time": elapsed_time, "num_requests": len(requests), "total_num_tokens": total_num_tokens, "requests_per_second": len(requests) / elapsed_time, "tokens_per_second": total_num_tokens / elapsed_time, } with open(args.output_json, "w") as f: json.dump(results, f, indent=4) save_to_pytorch_benchmark_format(args, results) def validate_args(args): """ Validate command-line arguments. """ # === Deprecation and Defaulting === if args.dataset is not None: warnings.warn( "The '--dataset' argument will be deprecated in the next release. " "Please use '--dataset-name' and '--dataset-path' instead.", stacklevel=2, ) args.dataset_path = args.dataset if not getattr(args, "tokenizer", None): args.tokenizer = args.model # === Backend Validation === valid_backends = {"vllm", "hf", "mii", "vllm-chat"} if args.backend not in valid_backends: raise ValueError(f"Unsupported backend: {args.backend}") # === Dataset Configuration === if not args.dataset and not args.dataset_path: print("When dataset path is not set, it will default to random dataset") args.dataset_name = "random" if args.input_len is None: raise ValueError("input_len must be provided for a random dataset") # === Dataset Name Specific Checks === # --hf-subset and --hf-split: only used # when dataset_name is 'hf' if args.dataset_name != "hf" and ( getattr(args, "hf_subset", None) is not None or getattr(args, "hf_split", None) is not None ): warnings.warn( "--hf-subset and --hf-split will be ignored \ since --dataset-name is not 'hf'.", stacklevel=2, ) elif args.dataset_name == "hf": if args.dataset_path in ( VisionArenaDataset.SUPPORTED_DATASET_PATHS.keys() | ConversationDataset.SUPPORTED_DATASET_PATHS ): assert args.backend == "vllm-chat", ( f"{args.dataset_path} needs to use vllm-chat as the backend." ) # noqa: E501 elif args.dataset_path in ( InstructCoderDataset.SUPPORTED_DATASET_PATHS | AIMODataset.SUPPORTED_DATASET_PATHS ): assert args.backend == "vllm", ( f"{args.dataset_path} needs to use vllm as the backend." ) # noqa: E501 else: raise ValueError(f"{args.dataset_path} is not supported by hf dataset.") # --random-range-ratio: only used when dataset_name is 'random' if args.dataset_name != "random" and args.random_range_ratio is not None: warnings.warn( "--random-range-ratio will be ignored since \ --dataset-name is not 'random'.", stacklevel=2, ) # --prefix-len: only used when dataset_name is 'random', 'sonnet', or not # set. if ( args.dataset_name not in {"random", "sonnet", None} and args.prefix_len is not None ): warnings.warn( "--prefix-len will be ignored since --dataset-name\ is not 'random', 'sonnet', or not set.", stacklevel=2, ) # === LoRA Settings === if getattr(args, "enable_lora", False) and args.backend != "vllm": raise ValueError("LoRA benchmarking is only supported for vLLM backend") if getattr(args, "enable_lora", False) and args.lora_path is None: raise ValueError("LoRA path must be provided when enable_lora is True") # === Backend-specific Validations === if args.backend == "hf" and args.hf_max_batch_size is None: raise ValueError("HF max batch size is required for HF backend") if args.backend != "hf" and args.hf_max_batch_size is not None: raise ValueError("HF max batch size is only for HF backend.") if ( args.backend in {"hf", "mii"} and getattr(args, "quantization", None) is not None ): raise ValueError("Quantization is only for vLLM backend.") if args.backend == "mii" and args.dtype != "auto": raise ValueError("dtype must be auto for MII backend.") if args.backend == "mii" and args.n != 1: raise ValueError("n must be 1 for MII backend.") if args.backend == "mii" and args.tokenizer != args.model: raise ValueError("Tokenizer must be the same as the model for MII backend.") # --data-parallel is not supported currently. # https://github.com/vllm-project/vllm/issues/16222 if args.data_parallel_size > 1: raise ValueError( "Data parallel is not supported in offline benchmark, \ please use benchmark serving instead" ) def create_argument_parser(): parser = FlexibleArgumentParser(description="Benchmark the throughput.") parser.add_argument( "--backend", type=str, choices=["vllm", "hf", "mii", "vllm-chat"], default="vllm", ) parser.add_argument( "--dataset-name", type=str, choices=["sharegpt", "random", "sonnet", "burstgpt", "hf"], help="Name of the dataset to benchmark on.", default="sharegpt", ) parser.add_argument( "--no-stream", action="store_true", help="Do not load the dataset in streaming mode.", ) parser.add_argument( "--dataset", type=str, default=None, help="Path to the ShareGPT dataset, will be deprecated in\ the next release. The dataset is expected to " "be a json in form of list[dict[..., conversations: " "list[dict[..., value: <prompt_or_response>]]]]", ) parser.add_argument( "--dataset-path", type=str, default=None, help="Path to the dataset" ) parser.add_argument( "--input-len", type=int, default=None, help="Input prompt length for each request", ) parser.add_argument( "--output-len", type=int, default=None, help="Output length for each request. Overrides the " "output length from the dataset.", ) parser.add_argument( "--n", type=int, default=1, help="Number of generated sequences per prompt." ) parser.add_argument( "--num-prompts", type=int, default=1000, help="Number of prompts to process." ) parser.add_argument( "--hf-max-batch-size", type=int, default=None, help="Maximum batch size for HF backend.", ) parser.add_argument( "--output-json", type=str, default=None, help="Path to save the throughput results in JSON format.", ) parser.add_argument( "--async-engine", action="store_true", default=False, help="Use vLLM async engine rather than LLM class.", ) parser.add_argument( "--disable-frontend-multiprocessing", action="store_true", default=False, help="Disable decoupled async engine frontend.", ) parser.add_argument( "--disable-detokenize", action="store_true", help=( "Do not detokenize the response (i.e. do not include " "detokenization time in the measurement)" ), ) # LoRA parser.add_argument( "--lora-path", type=str, default=None, help="Path to the LoRA adapters to use. This can be an absolute path, " "a relative path, or a Hugging Face model identifier.", ) parser.add_argument( "--prefix-len", type=int, default=None, help=f"Number of prefix tokens to be used in RandomDataset " "and SonnetDataset. For RandomDataset, the total input " "length is the sum of prefix-len (default: " f"{RandomDataset.DEFAULT_PREFIX_LEN}) and a random context length " "sampled from [input_len * (1 - range_ratio), " "input_len * (1 + range_ratio)]. For SonnetDataset, " f"prefix_len (default: {SonnetDataset.DEFAULT_PREFIX_LEN}) " "controls how much of the input is fixed lines versus " "random lines, but the total input length remains approximately " "input_len tokens.", ) # random dataset parser.add_argument( "--random-range-ratio", type=float, default=None, help=f"Range ratio (default : {RandomDataset.DEFAULT_RANGE_RATIO}) " "for sampling input/output length, " "used only for RandomDataset. Must be in the range [0, 1) to " "define a symmetric sampling range " "[length * (1 - range_ratio), length * (1 + range_ratio)].", ) # hf dtaset parser.add_argument( "--hf-subset", type=str, default=None, help="Subset of the HF dataset." ) parser.add_argument( "--hf-split", type=str, default=None, help="Split of the HF dataset." ) parser = AsyncEngineArgs.add_cli_args(parser) return parser if __name__ == "__main__": parser = create_argument_parser() args = parser.parse_args() if args.tokenizer is None: args.tokenizer = args.model validate_args(args) main(args)
07-29
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值