python BulkWriteError: batch op errors occurred

本文介绍了一种在使用Python操作MongoDB时遇到的批量写入错误,并给出了具体的错误信息及堆栈跟踪。错误原因是由于唯一索引冲突导致的批量写入失败,文中提供了两种解决方案:检查并清理重复数据或移除造成冲突的唯一索引。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

insert_many 的时候遇到如下错误

File "C:/Users/Administrator/PycharmProjects/untitled/UrlLibTest.py", line 62, in <module>

    FundUtil.FundUtil.getAllFundOneDayData()
  File "C:\Users\Administrator\PycharmProjects\untitled\FundUtil.py", line 90, in getAllFundOneDayData
    MongoUtil.MongoUtil.doInsert(documents);
  File "C:\Users\Administrator\PycharmProjects\untitled\MongoUtil.py", line 13, in doInsert
    funDTable.insert_many(documents);
  File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\pymongo\collection.py", line 724, in insert_many
    blk.execute(self.write_concern.document)
  File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\pymongo\bulk.py", line 495, in execute
    return self.execute_legacy(sock_info, generator, write_concern)
  File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\pymongo\bulk.py", line 462, in execute_legacy
    raise BulkWriteError(full_result)

pymongo.errors.BulkWriteError: batch op errors occurre


原因:插入的时候有唯一索引重复了。。。

检查插入的数据或者去掉该索引即可

下面是我的代码: import os import cv2 import numpy as np import tensorflow as tf from tensorflow.keras import layers, models, applications from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.optimizers.legacy import Adam from sklearn.model_selection import train_test_split import tensorflow_addons as tfa # 新增用于高级优化器 # 配置文件路径 YIZHUIHE_PATH = "yizhuihe" STATS_PATH = "zhuihetongji.xlsx" PREPROCESSED_DATA_PATH = "preprocessed_data4" # 保存预处理数据的目录 MODEL_PATH = "bamboo_matching_model4.h5" # 模型保存路径 EPOCHS = 10 BATCH_SIZE = 32 # 创建保存预处理数据的目录 os.makedirs(PREPROCESSED_DATA_PATH, exist_ok=True) # -------------------- 数据预处理模块 -------------------- class BambooPreprocessor: def __init__(self, target_size=(256, 64)): self.target_size = target_size self.region_size = (64, 64) def _enhance_texture(self, img): """增强竹简纹理特征""" # 转换为RGB格式保持一致性 img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # 1. 自适应直方图均衡化 lab = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2LAB) l, a, b = cv2.split(lab) clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8)) l = clahe.apply(l) lab = cv2.merge((l, a, b)) enhanced = cv2.cvtColor(lab, cv2.COLOR_LAB2RGB) # 2. 纹理方向滤波 kernel = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) textured = cv2.filter2D(enhanced, -1, kernel) return textured def _detect_fracture(self, img): """改进的断口检测方法""" # 灰度化+二值化 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) _, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU) # 边缘检测(自适应阈值) low_thresh = np.percentile(gray, 25) high_thresh = np.percentile(gray, 75) edges = cv2.Canny(gray, low_thresh, high_thresh) # 轮廓分析(OpenCV版本兼容处理) if cv2.__version__.startswith('4'): contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) else: _, contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if not contours: return None main_contour = max(contours, key=cv2.contourArea) # 提取边界区域 x, y, w, h = cv2.boundingRect(main_contour) boundary_width = 10 # 统一边界宽度 # 确保边界在图像范围内 top = img[max(y, 0):min(y+boundary_width, img.shape[0]), max(x, 0):min(x+boundary_width, img.shape[1])] bottom = img[max(y+h-boundary_width, 0):min(y+h, img.shape[0]), max(x, 0):min(x+boundary_width, img.shape[1])] left = img[max(y, 0):min(y+h, img.shape[0]), max(x, 0):min(x+boundary_width, img.shape[1])] right = img[max(y, 0):min(y+h, img.shape[0]), max(x+w-boundary_width, 0):min(x+w, img.shape[1])] # 方向特征 sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=5) sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=5) orientation = np.arctan2(sobely, sobelx) orientation_region = orientation[max(y, 0):min(y+h, img.shape[0]), max(x, 0):min(x+w, img.shape[1])] return { 'top': top, 'bottom': bottom, 'left': left, 'right': right, 'orientation': orientation_region } def preprocess(self, img_path): img = cv2.imread(img_path) if img is None: return None # 纹理增强 img = self._enhance_texture(img) # 提取断口区域 fracture_regions = self._detect_fracture(img) if not fracture_regions: return None # 统一所有区域的大小 resized_regions = {} for region_name in ['top', 'bottom', 'left', 'right']: region = fracture_regions[region_name] # 确保是单通道灰度图 if len(region.shape) == 3: region = cv2.cvtColor(region, cv2.COLOR_BGR2GRAY) resized_regions[region_name] = cv2.resize(region, self.region_size) # 处理方向特征图 orientation = fracture_regions['orientation'] orientation = cv2.normalize(orientation, None, 0, 255, cv2.NORM_MINMAX) orientation = cv2.resize(orientation, self.region_size) # 创建多通道特征图 channels = [ resized_regions['top'], resized_regions['bottom'], resized_regions['left'], resized_regions['right'], orientation ] # 堆叠通道并调整最终尺寸 processed = np.stack(channels, axis=-1) return cv2.resize(processed, self.target_size) # -------------------- # 数据加载器 # -------------------- class BambooDataset: def __init__(self, data_root): self.data_root = data_root self.groups = self._load_groups() self.preprocessor = BambooPreprocessor() def _load_groups(self): """加载已缀合组信息""" groups = {} for group_id in os.listdir(self.data_root): group_path = os.path.join(self.data_root, group_id) if os.path.isdir(group_path): groups[group_id] = [ os.path.join(group_path, f) for f in os.listdir(group_path) if f.endswith(('.jpg', '.png')) ] return groups def _generate_pairs(self): """生成训练样本对(正负样本)""" positive_pairs = [] negative_pairs = [] # 正样本:同组内竹简 for group_id, paths in self.groups.items(): if len(paths) < 2: continue for i in range(len(paths)): for j in range(i+1, len(paths)): positive_pairs.append((paths[i], paths[j], 1)) # 负样本:不同组竹简 all_paths = [p for paths in self.groups.values() for p in paths] group_ids = list(self.groups.keys()) # 平衡正负样本数量 num_neg = min(len(positive_pairs), len(all_paths) * 2) for _ in range(num_neg): while True: path1 = np.random.choice(all_paths) path2 = np.random.choice(all_paths) group1 = next(g for g, ps in self.groups.items() if path1 in ps) group2 = next(g for g, ps in self.groups.items() if path2 in ps) if group1 != group2: negative_pairs.append((path1, path2, 0)) break return positive_pairs + negative_pairs def load_dataset(self): """加载预处理后的数据集""" pairs = self._generate_pairs() X1, X2, y = [], [], [] for path1, path2, label in pairs: img1 = self.preprocessor.preprocess(path1) img2 = self.preprocessor.preprocess(path2) if img1 is not None and img2 is not None: X1.append(img1) X2.append(img2) y.append(label) return np.array(X1), np.array(X2), np.array(y) def create_improved_siamese_network(input_shape): """改进的孪生网络架构""" def spatial_attention(input_tensor): """空间注意力机制""" avg_pool = layers.Lambda(lambda x: tf.reduce_mean(x, axis=3, keepdims=True))(input_tensor) max_pool = layers.Lambda(lambda x: tf.reduce_max(x, axis=3, keepdims=True))(input_tensor) concat = layers.Concatenate(axis=3)([avg_pool, max_pool]) cbam_feature = layers.Conv2D(1, kernel_size=7, padding='same', activation='sigmoid')(concat) return layers.Multiply()([input_tensor, cbam_feature]) # 输入层 base_input = layers.Input(shape=input_shape) # 通道适配层:将5通道转换为3通道 x = layers.Conv2D(3, (1, 1), padding='same', name='channel_adapter')(base_input) # 共享特征提取器(使用ResNet50) base_model = applications.ResNet50( weights='imagenet', include_top=False, input_shape=(input_shape[0], input_shape[1], 3) ) x = base_model(x) # 添加注意力机制 x = spatial_attention(x) # 多尺度特征融合 branch1 = layers.GlobalAveragePooling2D()(x) branch2 = layers.GlobalMaxPooling2D()(x) x = layers.Concatenate()([branch1, branch2]) # 增强特征表示 x = layers.Dense(512, activation='relu', kernel_regularizer=tf.keras.regularizers.l1_l2(0.01, 0.01))(x) x = layers.Dropout(0.3)(x) feature_extractor = models.Model(inputs=base_input, outputs=x) # 孪生架构 input_a = layers.Input(shape=input_shape) input_b = layers.Input(shape=input_shape) features_a = feature_extractor(input_a) features_b = feature_extractor(input_b) # 改进的特征差异度量 diff = layers.Subtract()([features_a, features_b]) abs_diff = layers.Lambda(lambda x: tf.abs(x))(diff) squared_diff = layers.Lambda(lambda x: tf.square(x))(diff) concat_diff = layers.Concatenate()([abs_diff, squared_diff]) # 相似性分类 x = layers.Dense(256, activation='relu')(concat_diff) x = layers.Dropout(0.2)(x) classification_output = layers.Dense(1, activation='sigmoid', name='classification')(x) # 添加特征差异作为额外输出 feat_diff = layers.Lambda( lambda x: tf.reduce_mean(tf.abs(x), axis=1), name='feature_difference' )(concat_diff) # 创建具有两个输出的模型 siamese_model = models.Model( inputs=[input_a, input_b], outputs=[classification_output, feat_diff] ) return siamese_model # 配对数据增强生成器 class PairedDataGenerator: def __init__(self, X1, X2, y, datagen, batch_size=32): self.X1 = X1 self.X2 = X2 self.y = y self.datagen = datagen self.batch_size = batch_size # 初始化数据生成器 self.genX1 = datagen.flow(X1, y, batch_size=batch_size, shuffle=False) self.genX2 = datagen.flow(X2, y, batch_size=batch_size, shuffle=False) def __iter__(self): return self def __next__(self): X1_batch, y_batch = next(self.genX1) X2_batch, _ = next(self.genX2) return [X1_batch, X2_batch], y_batch # 困难样本挖掘回调 class HardExampleMiner(tf.keras.callbacks.Callback): def __init__(self, train_data, threshold=0.2): self.X1_train, self.X2_train, self.y_train = train_data self.threshold = threshold def on_epoch_end(self, epoch, logs=None): # 获取当前批次预测结果 y_pred, _ = self.model.predict([self.X1_train, self.X2_train], verbose=0) # 选择预测概率接近0.5的困难样本 hard_indices = np.where(np.abs(y_pred.squeeze() - 0.5) < self.threshold)[0] # 增强训练(仅训练困难样本) if len(hard_indices) > 0: self.model.fit( [self.X1_train[hard_indices], self.X2_train[hard_indices]], [self.y_train[hard_indices], np.zeros(len(hard_indices))], # 为特征差异输出提供伪标签 epochs=1, batch_size=BATCH_SIZE, verbose=0 ) def hybrid_loss(y_true, y_pred): """混合损失函数 - 同时处理分类输出和特征差异""" # 解包预测值:y_pred 包含两个输出 classification_output = y_pred[0] feat_diff = y_pred[1] # 确保y_true是正确形状 (batch_size, 1) y_true = tf.reshape(tf.cast(y_true, tf.float32), [-1, 1]) # 计算二元交叉熵损失 - 确保维度匹配 bce = tf.keras.losses.binary_crossentropy( y_true, classification_output, from_logits=False ) # 特征差异正则化 - 确保维度匹配 reg_loss = 0.1 * tf.maximum(0.5 - feat_diff, 0) return bce + reg_loss def train_and_evaluate(): # 检查预处理数据文件是否存在 x1_path = os.path.join(PREPROCESSED_DATA_PATH, "X1.npy") x2_path = os.path.join(PREPROCESSED_DATA_PATH, "X2.npy") y_path = os.path.join(PREPROCESSED_DATA_PATH, "y.npy") if os.path.exists(x1_path) and os.path.exists(x2_path) and os.path.exists(y_path): # 加载预处理数据 X1 = np.load(x1_path) X2 = np.load(x2_path) y = np.load(y_path) print("预处理数据已加载") else: # 加载并预处理数据 dataset = BambooDataset(YIZHUIHE_PATH) X1, X2, y = dataset.load_dataset() # 保存预处理数据 np.save(x1_path, X1) np.save(x2_path, X2) np.save(y_path, y) print("预处理数据已保存") # 划分数据集 X1_train, X1_val, X2_train, X2_val, y_train, y_val = train_test_split( X1, X2, y, test_size=0.2, random_state=42 ) # 数据增强配置 datagen = ImageDataGenerator( rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.1, horizontal_flip=True, fill_mode='nearest' ) # 创建配对数据生成器 train_generator = PairedDataGenerator( X1_train, X2_train, y_train, datagen, batch_size=BATCH_SIZE ) # 创建模型 siamese_model = create_improved_siamese_network(X1_train.shape[1:]) # 优化器(使用Lookahead优化器提升收敛性) optimizer = tfa.optimizers.Lookahead( Adam(learning_rate=1e-4), sync_period=6, slow_step_size=0.5 ) # 编译模型 - 使用自定义混合损失函数 siamese_model.compile( optimizer=optimizer, loss=hybrid_loss, metrics={'classification': 'accuracy'} # 只为分类输出计算准确率 ) # 早停策略 - 监控分类输出的准确率 early_stopping = EarlyStopping( monitor='val_classification_accuracy', patience=10, restore_best_weights=True, min_delta=0.001 ) # 困难样本挖掘 hard_miner = HardExampleMiner( train_data=(X1_train, X2_train, y_train), threshold=0.2 ) # 模型训练 - 注意:验证数据需要两个输出 history = siamese_model.fit( train_generator, steps_per_epoch=len(X1_train) // BATCH_SIZE, validation_data=([X1_val, X2_val], [y_val, np.zeros(len(y_val))]), # 为特征差异输出提供伪标签 epochs=EPOCHS, callbacks=[early_stopping, hard_miner], class_weight={0: 1.0, 1: 1.5} ) # 保存模型 siamese_model.save(MODEL_PATH) # 模型评估 - 只关注分类输出 loss = siamese_model.evaluate([X1_val, X2_val], [y_val, np.zeros(len(y_val))]) # 损失函数返回总损失,但我们关心分类准确率 # 在评估中,metrics 会返回分类准确率 print(f"验证准确率: {history.history['val_classification_accuracy'][-1]*100:.2f}%") return history if __name__ == "__main__": train_and_evaluate()在训练完毕后遇到了报错ifference_loss: 1.1556 - classification_accuracy: 0.4956 - val_loss: 36.8123 - val_classification_loss: 0.6945 - val_feature_difference_loss: 0.1284 - val_classification_accuracy: 0.4487 Epoch 10/10 1/32 [..............................] - ETA: 6:00 - loss: 30.5024 - classification_loss: 0.8720 - feature_differ 2/32 [>.............................] - ETA: 6:02 - loss: 30.2336 - classification_loss: 0.8529 - feature_differ 3/32 [=>............................] - ETA: 5:56 - loss: 30.5036 - classification_loss: 0.8665 - feature_differ 4/32 [==>...........................] - ETA: 5:47 - loss: 30.2310 - classification_loss: 0.8640 - feature_differ 5/32 [===>..........................] - ETA: 5:35 - loss: 29.9351 - classification_loss: 0.8612 - feature_differ 6/32 [====>.........................] - ETA: 5:23 - loss: 29.8819 - classification_loss: 0.8636 - feature_differ 7/32 [=====>........................] - ETA: 5:11 - loss: 29.8167 - classification_loss: 0.8773 - feature_differ 8/32 [======>.......................] - ETA: 4:58 - loss: 29.7443 - classification_loss: 0.9530 - feature_differ 9/32 [=======>......................] - ETA: 4:39 - loss: 29.6304 - classification_loss: 0.9469 - feature_differ10/32 [========>.....................] - ETA: 4:27 - loss: 29.4564 - classification_loss: 0.9363 - feature_differ11/32 [=========>....................] - ETA: 4:16 - loss: 29.2758 - classification_loss: 0.9282 - feature_differ12/32 [==========>...................] - ETA: 4:04 - loss: 29.1919 - classification_loss: 0.9231 - feature_differ13/32 [===========>..................] - ETA: 3:53 - loss: 29.1008 - classification_loss: 0.9268 - feature_differ14/32 [============>.................] - ETA: 3:41 - loss: 28.9858 - classification_loss: 0.9246 - feature_differ15/32 [=============>................] - ETA: 3:29 - loss: 28.8968 - classification_loss: 0.9279 - feature_differ16/32 [==============>...............] - ETA: 3:17 - loss: 28.7870 - classification_loss: 0.9224 - feature_differ17/32 [==============>...............] - ETA: 3:05 - loss: 28.6540 - classification_loss: 0.9196 - feature_differ18/32 [===============>..............] - ETA: 2:53 - loss: 28.5789 - classification_loss: 0.9091 - feature_differ19/32 [================>.............] - ETA: 2:41 - loss: 28.4827 - classification_loss: 0.9090 - feature_differ20/32 [=================>............] - ETA: 2:30 - loss: 28.3714 - classification_loss: 0.9066 - feature_differ21/32 [==================>...........] - ETA: 2:17 - loss: 28.2615 - classification_loss: 0.9061 - feature_differ22/32 [===================>..........] - ETA: 2:05 - loss: 28.1563 - classification_loss: 0.9246 - feature_differ23/32 [====================>.........] - ETA: 1:52 - loss: 28.0344 - classification_loss: 0.9217 - feature_differ24/32 [=====================>........] - ETA: 1:40 - loss: 27.9423 - classification_loss: 0.9150 - feature_differ25/32 [======================>.......] - ETA: 1:27 - loss: 27.8502 - classification_loss: 0.9217 - feature_differ26/32 [=======================>......] - ETA: 1:15 - loss: 27.7519 - classification_loss: 0.9186 - feature_differ27/32 [========================>.....] - ETA: 1:02 - loss: 27.6517 - classification_loss: 0.9175 - feature_differ28/32 [=========================>....] - ETA: 50s - loss: 27.5349 - classification_loss: 0.9154 - feature_differe29/32 [==========================>...] - ETA: 37s - loss: 27.4168 - classification_loss: 0.9140 - feature_differe30/32 [===========================>..] - ETA: 25s - loss: 27.3513 - classification_loss: 0.9132 - feature_differe31/32 [============================>.] - ETA: 12s - loss: 27.2566 - classification_loss: 0.9135 - feature_differe32/32 [==============================] - ETA: 0s - loss: 27.1861 - classification_loss: 0.9117 - feature_difference_loss: 1.1294 - classification_accuracy: 0.4956 2025-06-22 14:05:17.364362: W tensorflow/core/framework/op_kernel.cc:1839] OP_REQUIRES failed at fused_batch_norm_op.cc:1565 : RESOURCE_EXHAUSTED: OOM when allocating tensor with shape[32,4,16,1024] and type float on /job:localhost/replica:0/task:0/device:CPU:0 by allocator cpu 2025-06-22 14:05:17.396860: W tensorflow/core/framework/op_kernel.cc:1839] OP_REQUIRES failed at conv_ops_fused_impl.h:772 : RESOURCE_EXHAUSTED: OOM when allocating tensor with shape[32,4,16,1024] and type float on /job:localhost/replica:0/task:0/device:CPU:0 by allocator cpu Traceback (most recent call last): File "c:\Users\lin13\Desktop\data\lab9-8.py", line 436, in <module> train_and_evaluate() File "c:\Users\lin13\Desktop\data\lab9-8.py", line 415, in train_and_evaluate history = siamese_model.fit( File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\utils\traceback_utils.py", line 70, in error_handler raise e.with_traceback(filtered_tb) from None File "c:\Users\lin13\Desktop\data\lab9-8.py", line 310, in on_epoch_end self.model.fit( tensorflow.python.framework.errors_impl.ResourceExhaustedError: Graph execution error: Detected at node model_1/model/resnet50/conv4_block6_3_bn/FusedBatchNormV3 defined at (most recent call last): File "c:\Users\lin13\Desktop\data\lab9-8.py", line 436, in <module> File "c:\Users\lin13\Desktop\data\lab9-8.py", line 415, in train_and_evaluate File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\utils\traceback_utils.py", line 65, in error_handler File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\training.py", line 1850, in fit File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\callbacks.py", line 453, in on_epoch_end File "c:\Users\lin13\Desktop\data\lab9-8.py", line 310, in on_epoch_end File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\utils\traceback_utils.py", line 65, in error_handler File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\training.py", line 1783, in fit File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\training.py", line 1377, in train_function File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\training.py", line 1360, in step_function File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\training.py", line 1349, in run_step File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\training.py", line 1126, in train_step File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\utils\traceback_utils.py", line 65, in error_handler File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\training.py", line 589, in __call__ File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\utils\traceback_utils.py", line 65, in error_handler File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\base_layer.py", line 1149, in __call__ File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\utils\traceback_utils.py", line 96, in error_handler File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\functional.py", line 515, in call File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\functional.py", line 672, in _run_internal_graph File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\utils\traceback_utils.py", line 65, in error_handler File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\training.py", line 589, in __call__ File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\utils\traceback_utils.py", line 65, in error_handler File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\base_layer.py", line 1149, in __call__ File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\utils\traceback_utils.py", line 96, in error_handler File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\functional.py", line 515, in call File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\functional.py", line 672, in _run_internal_graph File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\utils\traceback_utils.py", line 65, in error_handler File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\training.py", line 589, in __call__ File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\utils\traceback_utils.py", line 65, in error_handler File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\base_layer.py", line 1149, in __call__ File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\utils\traceback_utils.py", line 96, in error_handler File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\functional.py", line 515, in call File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\functional.py", line 672, in _run_internal_graph File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\utils\traceback_utils.py", line 65, in error_handler File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\engine\base_layer.py", line 1149, in __call__ File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\utils\traceback_utils.py", line 96, in error_handler File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\layers\normalization\batch_normalization.py", line 597, in call File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\layers\normalization\batch_normalization.py", line 990, in _fused_batch_norm File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\utils\control_flow_util.py", line 108, in smart_cond File "C:\Users\lin13\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\src\layers\normalization\batch_normalization.py", line 964, in _fused_batch_norm_training OOM when allocating tensor with shape[32,4,16,1024] and type float on /job:localhost/replica:0/task:0/device:CPU:0 by allocator cpu [[{{node model_1/model/resnet50/conv4_block6_3_bn/FusedBatchNormV3}}]] Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info. This isn't available when running in Eager mode. [Op:__inference_train_function_66233] 2025-06-22 14:05:32.873794: W tensorflow/core/kernels/data/generator_dataset_op.cc:108] Error occurred when finalizing GeneratorDataset iterator: FAILED_PRECONDITION: Python interpreter state is not initialized. The process may be terminated. [[{{node PyFunc}}]] PS C:\Users\lin13\Desktop\data> 帮我看看为什么
最新发布
06-23
``` import tensorflow as tf from keras import datasets, layers, models import matplotlib.pyplot as plt # 导入mnist数据,依次分别为训练集图片、训练集标签、测试集图片、测试集标签 (train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data() # 将像素的值标准化至0到1的区间内。(对于灰度图片来说,每个像素最大值是255,每个像素最小值是0,也就是直接除以255就可以完成归一化。) train_images, test_images = train_images / 255.0, test_images / 255.0 # 查看数据维数信息 print(train_images.shape,test_images.shape,train_labels.shape,test_labels.shape) #调整数据到我们需要的格式 train_images = train_images.reshape((60000, 28, 28, 1)) test_images = test_images.reshape((10000, 28, 28, 1)) print(train_images.shape,test_images.shape,train_labels.shape,test_labels.shape) train_images = train_images.astype("float32") / 255.0 def image_to_patches(images, patch_size=4): batch_size = tf.shape(images)[0] patches = tf.image.extract_patches( images=images[:, :, :, tf.newaxis], sizes=[1, patch_size, patch_size, 1], strides=[1, patch_size, patch_size, 1], rates=[1, 1, 1, 1], padding="VALID" ) return tf.reshape(patches, [batch_size, -1, patch_size*patch_size*1]) class TransformerBlock(tf.keras.layers.Layer): def __init__(self, embed_dim, num_heads): super().__init__() self.att = tf.keras.layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim) self.ffn = tf.keras.Sequential([ tf.keras.layers.Dense(embed_dim*4, activation="relu"), tf.keras.layers.Dense(embed_dim) ]) self.layernorm1 = tf.keras.layers.LayerNormalization() self.layernorm2 = tf.keras.layers.LayerNormalization() def call(self, inputs): attn_output = self.att(inputs, inputs) out1 = self.layernorm1(inputs + attn_output) ffn_output = self.ffn(out1) return self.layernorm2(out1 + ffn_output) class PositionEmbedding(tf.keras.layers.Layer): def __init__(self, max_len, embed_dim): super().__init__() self.pos_emb = tf.keras.layers.Embedding(input_dim=max_len, output_dim=embed_dim) def call(self, x): positions = tf.range(start=0, limit=tf.shape(x)[1], delta=1) return x + self.pos_emb(positions) def build_transformer_model(): inputs = tf.keras.Input(shape=(49, 16)) # 4x4 patches x = tf.keras.layers.Dense(64)(inputs) # 嵌入维度64 # 添加位置编码 x = PositionEmbedding(max_len=49, embed_dim=64)(x) # 堆叠Transformer模块 x = TransformerBlock(embed_dim=64, num_heads=4)(x) x = TransformerBlock(embed_dim=64, num_heads=4)(x) # 分类头 x = tf.keras.layers.GlobalAveragePooling1D()(x) outputs = tf.keras.layers.Dense(10, activation="softmax")(x) return tf.keras.Model(inputs=inputs, outputs=outputs) model = build_transformer_model() model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]) # 数据预处理 train_images_pt = image_to_patches(train_images[..., tf.newaxis]) test_images_pt = image_to_patches(test_images[..., tf.newaxis]) history = model.fit( train_images_pt, train_labels, validation_data=(test_images_pt, test_labels), epochs=10, batch_size=128 )```Exception has occurred: InvalidArgumentError input must be 4-dimensional[60000,28,28,1,1] [Op:ExtractImagePatches] tensorflow.python.eager.core._NotOkStatusException: InvalidArgumentError: input must be 4-dimensional[60000,28,28,1,1] [Op:ExtractImagePatches] During handling of the above exception, another exception occurred: File "D:\source\test3\transform.py", line 32, in image_to_patches patches = tf.image.extract_patches( File "D:\source\test3\transform.py", line 118, in <module> train_images_pt = image_to_patches(train_images[..., tf.newaxis]) # 输出形状(60000,49,16) tensorflow.python.framework.errors_impl.InvalidArgumentError: input must be 4-dimensional[60000,28,28,1,1] [Op:ExtractImagePatches]
03-12
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值