python3中Error Message: ‘float’ object cannot be interpreted as an integer

本文介绍了Python2与Python3中除法运算符“/”的行为差异。在Python2中,整数除法会丢弃小数部分;而在Python3中,则会得到浮点数结果。文章还提供了如何在两种情况下进行类型转换的方法。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

在Python2中,/的结果是int型整数

>>> 0/200 + 1 
1 
>>> 1/200 + 1 
1

而在Python3中,/的结果是float型

>>> 0/200 + 1 
1.0 
>>> 1/200 + 1 
1.005

在python2,/只留下了整数部分,去掉了小数,是int型。而在python3里,/的结果是真正意义上的除法,结果是float型。所以便出现了Error Message: ‘float’ object cannot be interpreted as an integer

解决办法:将float类型强转化为int类型就可以了

from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig import torch import time import logging # 配置UTF-8日志记录(关键修改) logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler('story_generation.log', encoding='utf-8'), logging.StreamHandler() ], encoding='utf-8' # 显式设置编码 ) class StoryGenerator: def __init__(self): self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") logging.info(f"🚀 初始化故事生成器 (设备: {self.device})") # print(f"Using device: {self.device}") # 配置8位量化 quant_config = BitsAndBytesConfig( load_in_8bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=True # 新增参数提升兼容性 ) # 加载量化模型 logging.info("📦 加载Llama-2模型...") self.model = AutoModelForCausalLM.from_pretrained( "I:/AI_Project/models/Llama-2-7b-hf", quantization_config=quant_config, # 新量化配置 max_memory={self.device: "11GiB"}, # 显存限制 ) self.tokenizer = AutoTokenizer.from_pretrained( "I:/AI_Project/models/Llama-2-7b-hf" ) # 新增填充标记设置(关键修改) if self.tokenizer.pad_token is None: self.tokenizer.pad_token = self.tokenizer.eos_token # 使用EOS标记作为填充 self.tokenizer.pad_token_id = self.tokenizer.eos_token_id # 生成配置初始化(关键修改) self.generation_config = self.model.generation_config self.generation_config.max_new_tokens = 512 self.generation_config.eos_token_id = self.tokenizer.eos_token_id self.generation_config.pad_token_id = self.tokenizer.pad_token_id self.generation_config.return_dict_in_generate = True # 设备一致性验证 self._validate_device_consistency() logging.info("✅ 故事生成器加载完成") # print("Story generator loaded!") # def validate_device_consistency(self): # """验证所有模型参数在相同设备""" # devices = set() # for name, param in self.model.named_parameters(): # devices.add(param.device) # if param.device != self.device: # print(f"警告: 参数 {name} 在 {param.device} 而非预期的 {self.device}") # assert len(devices) == 1, f"参数分布在多个设备: {devices}" def _validate_device_consistency(self): """验证模型参数设备一致性""" devices = {param.device for name, param in self.model.named_parameters()} if len(devices) > 1: logging.warning(f"⚠️ 检测到多个设备: {devices}") else: logging.info(f"🎯 所有参数设备一致: {devices.pop()}") def generate_chapter(self, prompt, max_length=512): """生成故事章节(带实时进度显示)""" logging.info(f"\n📝 开始生成: {prompt[:50]}...") start_time = time.time() # 输入数据预处理 inputs = self.tokenizer( prompt, return_tensors="pt", padding=True, truncation=True, max_length=512, return_attention_mask=True ).to(self.device) # 确保输入数据类型正确 input_ids = inputs["input_ids"].to(self.device).long() # 显式转换类型 attention_mask = inputs["attention_mask"].to(self.device).bool() # 显式转换类型 # 生成配置(关键修改) generation_config = { "max_new_tokens": max_length, "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, "return_dict_in_generate": True } # 在生成前打印参数类型(调试用) logging.debug(f"input_ids类型: {input_ids.dtype}, device: {input_ids.device}") logging.debug(f"attention_mask类型: {attention_mask.dtype}, device: {attention_mask.device}") with torch.no_grad(): # 生成配置(关键修改) outputs = self.model.generate( input_ids, attention_mask=attention_mask, generation_config=self.generation_config, max_new_tokens=int(max_length), # 显式转换为整数 eos_token_id=int(self.tokenizer.eos_token_id), # 显式转换 pad_token_id=int(self.tokenizer.pad_token_id) # 显式转换 ) # 输出生成结果 generated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True) logging.info(f"🎉 生成完成 (耗时: {time.time()-start_time:.1f}s)") logging.info(f"📑 生成文本预览: {generated_text[:500]}") # 显示前500字符 print(f"生成耗时: {time.time() - start_time:.2f}秒") return self.tokenizer.decode(outputs[0], skip_special_tokens=True) return generated_text报错ERROR - Error generating story: argument 'ids': 'list' object cannot be interpreted as an integer
07-06
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig import torch import time import logging # 配置UTF-8日志记录(关键修改) logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler('story_generation.log', encoding='utf-8'), logging.StreamHandler() ], encoding='utf-8' # 显式设置编码 ) class StoryGenerator: def __init__(self): self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") logging.info(f"🚀 初始化故事生成器 (设备: {self.device})") # print(f"Using device: {self.device}") # 配置8位量化 quant_config = BitsAndBytesConfig( load_in_8bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=True # 新增参数提升兼容性 ) # 加载量化模型 logging.info("📦 加载Llama-2模型...") self.model = AutoModelForCausalLM.from_pretrained( "I:/AI_Project/models/Llama-2-7b-hf", quantization_config=quant_config, # 新量化配置 max_memory={self.device: "11GiB"}, # 显存限制 ) self.tokenizer = AutoTokenizer.from_pretrained( "I:/AI_Project/models/Llama-2-7b-hf" ) # 新增填充标记设置(关键修改) if self.tokenizer.pad_token is None: self.tokenizer.pad_token = self.tokenizer.eos_token # 使用EOS标记作为填充 self.tokenizer.pad_token_id = self.tokenizer.eos_token_id # 生成配置初始化(关键修改) self.generation_config = self.model.generation_config self.generation_config.max_new_tokens = 512 self.generation_config.eos_token_id = self.tokenizer.eos_token_id self.generation_config.pad_token_id = self.tokenizer.pad_token_id self.generation_config.return_dict_in_generate = True # 设备一致性验证 self._validate_device_consistency() logging.info("✅ 故事生成器加载完成") # print("Story generator loaded!") # def validate_device_consistency(self): # """验证所有模型参数在相同设备""" # devices = set() # for name, param in self.model.named_parameters(): # devices.add(param.device) # if param.device != self.device: # print(f"警告: 参数 {name} 在 {param.device} 而非预期的 {self.device}") # assert len(devices) == 1, f"参数分布在多个设备: {devices}" def _validate_device_consistency(self): """验证模型参数设备一致性""" devices = {param.device for name, param in self.model.named_parameters()} if len(devices) > 1: logging.warning(f"⚠️ 检测到多个设备: {devices}") else: logging.info(f"🎯 所有参数设备一致: {devices.pop()}") def generate_chapter(self, prompt, max_length=512): """生成故事章节(带实时进度显示)""" logging.info(f"\n📝 开始生成: {prompt[:50]}...") start_time = time.time() # 输入数据预处理(关键修复) inputs = self.tokenizer( prompt, return_tensors="pt", padding=True, truncation=True, max_length=512, return_attention_mask=True ).to(self.device) # 确保输入数据类型正确 input_ids = inputs["input_ids"].long() # 显式转换类型 attention_mask = inputs["attention_mask"].bool() # 显式转换类型 # 生成配置(关键修改) generation_config = { "max_new_tokens": max_length, "eos_token_id": self.tokenizer.eos_token_id, "pad_token_id": self.tokenizer.pad_token_id, "return_dict_in_generate": True } # 在生成前打印参数类型(调试用) logging.debug(f"input_ids类型: {input_ids.dtype}, device: {input_ids.device}") logging.debug(f"attention_mask类型: {attention_mask.dtype}, device: {attention_mask.device}") with torch.no_grad(): # 生成配置(关键修改) outputs = self.model.generate( input_ids=input_ids, # 使用关键字参数 attention_mask=attention_mask, # 使用关键字参数 **generation_config, # 展开配置参数 # max_new_tokens=int(max_length), # 显式转换为整数 # eos_token_id=int(self.tokenizer.eos_token_id), # 显式转换 # pad_token_id=int(self.tokenizer.pad_token_id) # 显式转换 ) # 输出生成结果 generated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True) logging.info(f"🎉 生成完成 (耗时: {time.time()-start_time:.1f}s)") logging.info(f"📑 生成文本预览: {generated_text[:500]}") # 显示前500字符 print(f"生成耗时: {time.time() - start_time:.2f}秒") return self.tokenizer.decode(outputs[0], skip_special_tokens=True) return generated_text还是报错 ERROR - Error generating story: argument 'ids': 'list' object cannot be interpreted as an integer
07-06
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值