from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
def load_models_tokenizer():
checkpoint_path = ''
tokenizer = AutoTokenizer.from_pretrained(
checkpoint_path,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
checkpoint_path,
trust_remote_code=True,
torch_device='cpu',
torch_dtype=torch.float16
)
# return the result
return model, tokenizer
model, tokenizer = load_models_tokenizer()
tokenizer.save_pretrained('')
# save model
model.save_pretrained('', use_safetensors=True)
bin转 safetensors 用于midie推理
于 2024-08-15 10:34:11 首次发布