解决LibclangError: [Error 193] %1 不是有效的 Win32. To provide e Config.set_library_path() or Config.

本文介绍了解决在Windows 7 64位系统中遇到的LibclangError错误的方法,建议使用32位版本的Python、pyyaml及pyCheetah,并提供了tolua项目的下载链接及最新bindings-generator的获取方式。
部署运行你感兴趣的模型镜像

原文地址:http://blog.youkuaiyun.com/langzi7758521/article/details/50955889

问题:win7 64位及以上

LibclangError: [Error 193] %1 不是有效的 Win32. To provide

e Config.set_library_path() or Config.set_library_file()

解决方案:Python、pyyaml、pyCheetah都用32位版本

(ps:tolua目录下README.mdown里有下载链接)
如果还不行的话,就去github上下载最新的bindings-generator

(ps:下载地址: https://github.com/guojian822/bindings-generator)

您可能感兴趣的与本文相关的镜像

Python3.10

Python3.10

Conda
Python

Python 是一种高级、解释型、通用的编程语言,以其简洁易读的语法而闻名,适用于广泛的应用,包括Web开发、数据分析、人工智能和自动化脚本

这个python脚本默认开启: --optimization_style Fixed --enable_type_reduction吗 #!/usr/bin/env python3 # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import argparse import contextlib import enum import os import pathlib import tempfile import typing import onnxruntime as ort from .file_utils import files_from_file_or_dir, path_match_suffix_ignore_case from .onnx_model_utils import get_optimization_level from .ort_format_model import create_config_from_models class OptimizationStyle(enum.Enum): Fixed = 0 Runtime = 1 def _optimization_suffix(optimization_level_str: str, optimization_style: OptimizationStyle, suffix: str): return "{}{}{}".format( f".{optimization_level_str}" if optimization_level_str != "all" else "", ".with_runtime_opt" if optimization_style == OptimizationStyle.Runtime else "", suffix, ) def _create_config_file_path( model_path_or_dir: pathlib.Path, output_dir: typing.Optional[pathlib.Path], optimization_level_str: str, optimization_style: OptimizationStyle, enable_type_reduction: bool, ): config_name = "{}{}".format( "required_operators_and_types" if enable_type_reduction else "required_operators", _optimization_suffix(optimization_level_str, optimization_style, ".config"), ) if model_path_or_dir.is_dir(): return (output_dir or model_path_or_dir) / config_name model_config_path = model_path_or_dir.with_suffix(f".{config_name}") if output_dir is not None: return output_dir / model_config_path.name return model_config_path def _create_session_options( optimization_level: ort.GraphOptimizationLevel, output_model_path: pathlib.Path, custom_op_library: pathlib.Path, session_options_config_entries: typing.Dict[str, str], ): so = ort.SessionOptions() so.optimized_model_filepath = str(output_model_path) so.graph_optimization_level = optimization_level if custom_op_library: so.register_custom_ops_library(str(custom_op_library)) for key, value in session_options_config_entries.items(): so.add_session_config_entry(key, value) return so def _convert( model_path_or_dir: pathlib.Path, output_dir: typing.Optional[pathlib.Path], optimization_level_str: str, optimization_style: OptimizationStyle, custom_op_library: pathlib.Path, create_optimized_onnx_model: bool, allow_conversion_failures: bool, target_platform: str, session_options_config_entries: typing.Dict[str, str], ) -> typing.List[pathlib.Path]: model_dir = model_path_or_dir if model_path_or_dir.is_dir() else model_path_or_dir.parent output_dir = output_dir or model_dir optimization_level = get_optimization_level(optimization_level_str) def is_model_file_to_convert(file_path: pathlib.Path): if not path_match_suffix_ignore_case(file_path, ".onnx"): return False # ignore any files with an extension of .optimized.onnx which are presumably from previous executions # of this script if path_match_suffix_ignore_case(file_path, ".optimized.onnx"): print(f"Ignoring '{file_path}'") return False return True models = files_from_file_or_dir(model_path_or_dir, is_model_file_to_convert) if len(models) == 0: raise ValueError("No model files were found in '{}'".format(model_path_or_dir)) providers = ["CPUExecutionProvider"] # if the optimization level is 'all' we manually exclude the NCHWc transformer. It's not applicable to ARM # devices, and creates a device specific model which won't run on all hardware. # If someone really really really wants to run it they could manually create an optimized onnx model first, # or they could comment out this code. optimizer_filter = None if optimization_level == ort.GraphOptimizationLevel.ORT_ENABLE_ALL and target_platform != "amd64": optimizer_filter = ["NchwcTransformer"] converted_models = [] for model in models: try: relative_model_path = model.relative_to(model_dir) (output_dir / relative_model_path).parent.mkdir(parents=True, exist_ok=True) ort_target_path = (output_dir / relative_model_path).with_suffix( _optimization_suffix(optimization_level_str, optimization_style, ".ort") ) if create_optimized_onnx_model: # Create an ONNX file with the same optimization level that will be used for the ORT format file. # This allows the ONNX equivalent of the ORT format model to be easily viewed in Netron. # If runtime optimizations are saved in the ORT format model, there may be some difference in the # graphs at runtime between the ORT format model and this saved ONNX model. optimized_target_path = (output_dir / relative_model_path).with_suffix( _optimization_suffix(optimization_level_str, optimization_style, ".optimized.onnx") ) so = _create_session_options( optimization_level, optimized_target_path, custom_op_library, session_options_config_entries ) if optimization_style == OptimizationStyle.Runtime: # Limit the optimizations to those that can run in a model with runtime optimizations. so.add_session_config_entry("optimization.minimal_build_optimizations", "apply") print("Saving optimized ONNX model {} to {}".format(model, optimized_target_path)) _ = ort.InferenceSession( str(model), sess_options=so, providers=providers, disabled_optimizers=optimizer_filter ) # Load ONNX model, optimize, and save to ORT format so = _create_session_options( optimization_level, ort_target_path, custom_op_library, session_options_config_entries ) so.add_session_config_entry("session.save_model_format", "ORT") if optimization_style == OptimizationStyle.Runtime: so.add_session_config_entry("optimization.minimal_build_optimizations", "save") print("Converting optimized ONNX model {} to ORT format model {}".format(model, ort_target_path)) _ = ort.InferenceSession( str(model), sess_options=so, providers=providers, disabled_optimizers=optimizer_filter ) converted_models.append(ort_target_path) # orig_size = os.path.getsize(onnx_target_path) # new_size = os.path.getsize(ort_target_path) # print("Serialized {} to {}. Sizes: orig={} new={} diff={} new:old={:.4f}:1.0".format( # onnx_target_path, ort_target_path, orig_size, new_size, new_size - orig_size, new_size / orig_size)) except Exception as e: print("Error converting {}: {}".format(model, e)) if not allow_conversion_failures: raise print("Converted {}/{} models successfully.".format(len(converted_models), len(models))) return converted_models def parse_args(): parser = argparse.ArgumentParser( os.path.basename(__file__), description="""Convert the ONNX format model/s in the provided directory to ORT format models. All files with a `.onnx` extension will be processed. For each one, an ORT format model will be created in the given output directory, if specified, or the same directory. A configuration file will also be created containing the list of required operators for all converted models. This configuration file should be used as input to the minimal build via the `--include_ops_by_config` parameter. """, ) parser.add_argument( "--output_dir", type=pathlib.Path, help="Provide an output directory for the converted model/s and configuration file. " "If unspecified, the converted ORT format model/s will be in the same directory as the ONNX model/s.", ) parser.add_argument( "--optimization_style", nargs="+", default=[OptimizationStyle.Fixed.name, OptimizationStyle.Runtime.name], choices=[e.name for e in OptimizationStyle], help="Style of optimization to perform on the ORT format model. " "Multiple values may be provided. The conversion will run once for each value. " "The general guidance is to use models optimized with " f"'{OptimizationStyle.Runtime.name}' style when using NNAPI or CoreML and " f"'{OptimizationStyle.Fixed.name}' style otherwise. " f"'{OptimizationStyle.Fixed.name}': Run optimizations directly before saving the ORT " "format model. This bakes in any platform-specific optimizations. " f"'{OptimizationStyle.Runtime.name}': Run basic optimizations directly and save certain " "other optimizations to be applied at runtime if possible. This is useful when using a " "compiling EP like NNAPI or CoreML that may run an unknown (at model conversion time) " "number of nodes. The saved optimizations can further optimize nodes not assigned to the " "compiling EP at runtime.", ) parser.add_argument( "--enable_type_reduction", action="store_true", help="Add operator specific type information to the configuration file to potentially reduce " "the types supported by individual operator implementations.", ) parser.add_argument( "--custom_op_library", type=pathlib.Path, default=None, help="Provide path to shared library containing custom operator kernels to register.", ) parser.add_argument( "--save_optimized_onnx_model", action="store_true", help="Save the optimized version of each ONNX model. " "This will have the same level of optimizations applied as the ORT format model.", ) parser.add_argument( "--allow_conversion_failures", action="store_true", help="Whether to proceed after encountering model conversion failures.", ) parser.add_argument( "--target_platform", type=str, default=None, choices=["arm", "amd64"], help="Specify the target platform where the exported model will be used. " "This parameter can be used to choose between platform-specific options, " "such as QDQIsInt8Allowed(arm), NCHWc (amd64) and NHWC (arm/amd64) format, different " "optimizer level options, etc.", ) parser.add_argument( "model_path_or_dir", type=pathlib.Path, help="Provide path to ONNX model or directory containing ONNX model/s to convert. " "All files with a .onnx extension, including those in subdirectories, will be " "processed.", ) return parser.parse_args() def convert_onnx_models_to_ort(): args = parse_args() output_dir = None if args.output_dir is not None: if not args.output_dir.is_dir(): args.output_dir.mkdir(parents=True) output_dir = args.output_dir.resolve(strict=True) optimization_styles = [OptimizationStyle[style_str] for style_str in args.optimization_style] # setting optimization level is not expected to be needed by typical users, but it can be set with this # environment variable optimization_level_str = os.getenv("ORT_CONVERT_ONNX_MODELS_TO_ORT_OPTIMIZATION_LEVEL", "all") model_path_or_dir = args.model_path_or_dir.resolve() custom_op_library = args.custom_op_library.resolve() if args.custom_op_library else None if not model_path_or_dir.is_dir() and not model_path_or_dir.is_file(): raise FileNotFoundError("Model path '{}' is not a file or directory.".format(model_path_or_dir)) if custom_op_library and not custom_op_library.is_file(): raise FileNotFoundError("Unable to find custom operator library '{}'".format(custom_op_library)) session_options_config_entries = {} if args.target_platform == "arm": session_options_config_entries["session.qdqisint8allowed"] = "1" else: session_options_config_entries["session.qdqisint8allowed"] = "0" for optimization_style in optimization_styles: print( "Converting models with optimization style '{}' and level '{}'".format( optimization_style.name, optimization_level_str ) ) converted_models = _convert( model_path_or_dir=model_path_or_dir, output_dir=output_dir, optimization_level_str=optimization_level_str, optimization_style=optimization_style, custom_op_library=custom_op_library, create_optimized_onnx_model=args.save_optimized_onnx_model, allow_conversion_failures=args.allow_conversion_failures, target_platform=args.target_platform, session_options_config_entries=session_options_config_entries, ) with contextlib.ExitStack() as context_stack: if optimization_style == OptimizationStyle.Runtime: # Convert models again without runtime optimizations. # Runtime optimizations may not end up being applied, so we need to use both converted models with and # without runtime optimizations to get a complete set of ops that may be needed for the config file. model_dir = model_path_or_dir if model_path_or_dir.is_dir() else model_path_or_dir.parent temp_output_dir = context_stack.enter_context( tempfile.TemporaryDirectory(dir=model_dir, suffix=".without_runtime_opt") ) session_options_config_entries_for_second_conversion = session_options_config_entries.copy() # Limit the optimizations to those that can run in a model with runtime optimizations. session_options_config_entries_for_second_conversion[ "optimization.minimal_build_optimizations" ] = "apply" print( "Converting models again without runtime optimizations to generate a complete config file. " "These converted models are temporary and will be deleted." ) converted_models += _convert( model_path_or_dir=model_path_or_dir, output_dir=temp_output_dir, optimization_level_str=optimization_level_str, optimization_style=OptimizationStyle.Fixed, custom_op_library=custom_op_library, create_optimized_onnx_model=False, # not useful as they would be created in a temp directory allow_conversion_failures=args.allow_conversion_failures, target_platform=args.target_platform, session_options_config_entries=session_options_config_entries_for_second_conversion, ) print( "Generating config file from ORT format models with optimization style '{}' and level '{}'".format( optimization_style.name, optimization_level_str ) ) config_file = _create_config_file_path( model_path_or_dir, output_dir, optimization_level_str, optimization_style, args.enable_type_reduction, ) create_config_from_models(converted_models, config_file, args.enable_type_reduction) if __name__ == "__main__": convert_onnx_models_to_ort()
10-12
#!/usr/bin/env python # Copyright 2022 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # /// script # dependencies = [ # "transformers @ git+https://github.com/huggingface/transformers.git", # "torch>=1.5.0", # "torchvision>=0.6.0", # "datasets>=1.8.0", # ] # /// """ Training a CLIP like dual encoder models using text and vision encoders in the library. The script can be used to train CLIP like models for languages other than English by using a text encoder pre-trained in the desired language. Currently this script supports the following vision and text models: Vision models: ViT(https://huggingface.co/models?filter=vit), CLIP (https://huggingface.co/models?filter=clip) Text models: BERT, ROBERTa (https://huggingface.co/models?filter=fill-mask) """ import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from PIL import Image from torchvision.io import ImageReadMode, read_image from torchvision.transforms import CenterCrop, ConvertImageDtype, Normalize, Resize from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( AutoImageProcessor, AutoModel, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.utils import check_min_version from transformers.utils.versions import require_version logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.57.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/contrastive-image-text/requirements.txt") @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."}) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `hf auth login` (stored in `~/.huggingface`)." ) }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) freeze_vision_model: bool = field( default=False, metadata={"help": "Whether to freeze the vision model parameters or not."} ) freeze_text_model: bool = field( default=False, metadata={"help": "Whether to freeze the text model parameters or not."} ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) data_dir: Optional[str] = field(default=None, metadata={"help": "The data directory containing input files."}) image_column: Optional[str] = field( default="image_path", metadata={"help": "The name of the column in the datasets containing the full image file paths."}, ) caption_column: Optional[str] = field( default="caption", metadata={"help": "The name of the column in the datasets containing the image captions."}, ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a jsonlines file)."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file (a jsonlines file)."}, ) max_seq_length: Optional[int] = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." dataset_name_mapping = { "image_caption_dataset.py": ("image_path", "caption"), } # We use torchvision for faster image pre-processing. The transforms are implemented as nn.Module, # so we jit it to be faster. class Transform(torch.nn.Module): def __init__(self, image_size, mean, std): super().__init__() self.transforms = torch.nn.Sequential( Resize([image_size], interpolation=InterpolationMode.BICUBIC), CenterCrop(image_size), ConvertImageDtype(torch.float), Normalize(mean, std), ) def forward(self, x) -> torch.Tensor: """`x` should be an instance of `PIL.Image.Image`""" with torch.no_grad(): x = self.transforms(x) return x def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) input_ids = torch.tensor([example["input_ids"] for example in examples], dtype=torch.long) attention_mask = torch.tensor([example["attention_mask"] for example in examples], dtype=torch.long) return { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "return_loss": True, } def main(): # 1. Parse input arguments # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # 2. Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # 4. Load dataset # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files this script will use the first column for the full image path and the second column for the # captions (unless you specify column names for this with the `image_column` and `caption_column` arguments). # if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, keep_in_memory=False, data_dir=data_args.data_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] dataset = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # 5. Load pretrained model, tokenizer, and image processor if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script. " "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) # Load image_processor, in this script we only use this to get the mean and std for normalization. image_processor = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) model = AutoModel.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) config = model.config def _freeze_params(module): for param in module.parameters(): param.requires_grad = False if model_args.freeze_vision_model: _freeze_params(model.vision_model) if model_args.freeze_text_model: _freeze_params(model.text_model) # set seed for torch dataloaders set_seed(training_args.seed) # Preprocessing the datasets. # We need to tokenize inputs and targets. if training_args.do_train: column_names = dataset["train"].column_names elif training_args.do_eval: column_names = dataset["validation"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") return # 6. Get the column names for input/target. dataset_columns = dataset_name_mapping.get(data_args.dataset_name) if data_args.image_column is None: image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: image_column = data_args.image_column if image_column not in column_names: raise ValueError( f"--image_column' value '{data_args.image_column}' needs to be one of: {', '.join(column_names)}" ) if data_args.caption_column is None: caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: caption_column = data_args.caption_column if caption_column not in column_names: raise ValueError( f"--caption_column' value '{data_args.caption_column}' needs to be one of: {', '.join(column_names)}" ) # 7. Preprocessing the datasets. # Initialize torchvision transforms and jit it for faster processing. image_transformations = Transform( config.vision_config.image_size, image_processor.image_mean, image_processor.image_std ) image_transformations = torch.jit.script(image_transformations) # Preprocessing the datasets. # We need to tokenize input captions and transform the images. def tokenize_captions(examples): captions = list(examples[caption_column]) text_inputs = tokenizer(captions, max_length=data_args.max_seq_length, padding="max_length", truncation=True) examples["input_ids"] = text_inputs.input_ids examples["attention_mask"] = text_inputs.attention_mask return examples def transform_images(examples): images = [read_image(image_file, mode=ImageReadMode.RGB) for image_file in examples[image_column]] examples["pixel_values"] = [image_transformations(image) for image in images] return examples def filter_corrupt_images(examples): """remove problematic images""" valid_images = [] for image_file in examples[image_column]: try: Image.open(image_file) valid_images.append(True) except Exception: valid_images.append(False) return valid_images if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset") train_dataset = dataset["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) train_dataset = train_dataset.filter( filter_corrupt_images, batched=True, num_proc=data_args.preprocessing_num_workers ) train_dataset = train_dataset.map( function=tokenize_captions, batched=True, remove_columns=[col for col in column_names if col != image_column], num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) # Transform images on the fly as doing it on the whole dataset takes too much time. train_dataset.set_transform(transform_images) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a train validation") eval_dataset = dataset["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) eval_dataset = eval_dataset.filter( filter_corrupt_images, batched=True, num_proc=data_args.preprocessing_num_workers ) eval_dataset = eval_dataset.map( function=tokenize_captions, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=[col for col in column_names if col != image_column], load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) # Transform images on the fly as doing it on the whole dataset takes too much time. eval_dataset.set_transform(transform_images) # 8. Initialize our trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, data_collator=collate_fn, ) # 9. Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() tokenizer.save_pretrained(training_args.output_dir) image_processor.save_pretrained(training_args.output_dir) trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) trainer.save_state() # 10. Evaluation if training_args.do_eval: metrics = trainer.evaluate() trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # 11. Write Training Stats and push to hub. finetuned_from = model_args.model_name_or_path # If from a local directory, don't set `finetuned_from` as this is required to be a valid repo. id on the Hub. if os.path.isdir(finetuned_from): finetuned_from = None kwargs = {"finetuned_from": finetuned_from, "tasks": "contrastive-image-text-modeling"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) if __name__ == "__main__": main() 整体的代码分析讲解一下
最新发布
12-12
qwe123@qwe123-virtual-machine:~/CLionProjects/Code/cmph-2.0$ gcc test.c -o example -lcmph cc1: fatal error: test.c: No such file or directory compilation terminated. qwe123@qwe123-virtual-machine:~/CLionProjects/Code/cmph-2.0$ cd .. qwe123@qwe123-virtual-machine:~/CLionProjects/Code$ gcc test.c -o example -lcmph test.c: In function ‘main’: test.c:10:29: error: too few arguments to function ‘cmph_config_new’ 10 | cmph_config_t *config = cmph_config_new(); | ^~~~~~~~~~~~~~~ In file included from test.c:1: /usr/include/cmph.h:49:16: note: declared here 49 | cmph_config_t *cmph_config_new(cmph_io_adapter_t *key_source); | ^~~~~~~~~~~~~~~ test.c:12:5: warning: implicit declaration of function ‘cmph_config_set_keys’; did you mean ‘cmph_config_set_b’? [-Wimplicit-function-declaration] 12 | cmph_config_set_keys(config, keys, n_keys); | ^~~~~~~~~~~~~~~~~~~~ | cmph_config_set_b test.c:20:54: warning: implicit declaration of function ‘strlen’ [-Wimplicit-function-declaration] 20 | unsigned int id = cmph_search(hash, keys[i], strlen(keys[i])); | ^~~~~~ test.c:2:1: note: include ‘<string.h>’ or provide a declaration of ‘strlen’ 1 | #include <cmph.h> +++ |+#include <string.h> 2 | #include <stdio.h> test.c:20:54: warning: incompatible implicit declaration of built-in function ‘strlen’ [-Wbuiltin-declaration-mismatch] 20 | unsigned int id = cmph_search(hash, keys[i], strlen(keys[i])); | ^~~~~~ test.c:20:54: note: include ‘<string.h>’ or provide a declaration of ‘strlen’
07-31
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值