tf.stack,tf.argmax,SparseCategoricalCrossentropy: loss_object

本文深入探讨了TensorFlow中的tf.stack函数,用于将多个张量沿着指定轴堆叠;tf.argmax,用于获取一维张量中最大值的索引;以及SparseCategoricalCrossentropy损失函数,它是训练分类模型时常用的一种损失计算方式。通过实例解析,帮助读者掌握这些关键API的用法。
def stack(values, axis=0, name="stack"):
  """Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
  
  See also `tf.concat`, `tf.tile`, `tf.repeat`.

  Packs the list of tensors in `values` into a tensor with rank one higher than
  each tensor in `values`, by packing them along the `axis` dimension.
  Given a list of length `N` of tensors of shape `(A, B, C)`;

  if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
  if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
  Etc.

  For example:

  >>> x = tf.constant([1, 4])
  >>> y = tf.constant([2, 5])
  >>> z = tf.constant([3, 6])
  >>> tf.stack([x, y, z])
  <tf.Tensor: shape=(3, 2), dtype=int32, numpy=
  array([[1, 4],
         [2, 5],
         [3, 6]], dtype=int32)>
  >>> tf.stack([x, y, z], axis=1)
  <tf.Tensor: shape=(2, 3), dtype=int32, numpy=
  array([[1, 2, 3],
         [4, 5, 6]], dtype=int32)>

  This is the opposite of unstack.  The numpy equivalent is `np.stack`

  >>> np.array_equal(np.stack([x, y, z]), tf.stack([x, y, z]))
  True

  Args:
    values: A list of `Tensor` objects with the same shape and type.
    axis: An `int`. The axis to stack along. Defaults to the first dimension.
      Negative values wrap around, so the valid range is `[-(R+1), R+1)`.
    name: A name for this operation (optional).

  Returns:
    output: A stacked `Tensor` with the same type as `values`.

  Raises:
    Va
def argmax_v2(input, axis=None, output_type=dtypes.int64, name=None):
  """Returns the index with the largest value across axes of a tensor.

  Note that in case of ties the identity of the return value is not guaranteed.

  For example:

  >>> A = tf.constant([2, 20, 30, 3, 6])
  >>> tf.math.argmax(A)  # A[2] is maximum in tensor A
  <tf.Tensor: shape=(), dtype=int64, numpy=2>
  >>> B = tf.constant([[2, 20, 30, 3, 6], [3, 11, 16, 1, 8],
  ...                  [14, 45, 23, 5, 27]])
  >>> tf.math.argmax(B, 0)
  <tf.Tensor: shape=(5,), dtype=int64, numpy=array([2, 2, 0, 2, 2])>
  >>> tf.math.argmax(B, 1)
  <tf.Tensor: shape=(3,), dtype=int64, numpy=array([2, 2, 1])>

  Args:
    input: A `Tensor`.
    axis: An integer, the axis to reduce across. Default to 0.
    output_type: An optional output dtype (`tf.int32` or `tf.int64`). Defaults
      to `tf.int64`.
    name: An optional name for the operation.

  Returns:
    A `Tensor` of type `output_type`.
SparseCategoricalCrossentropy: loss_object(y_true, y_pred, sample_weight=None)

Computes the crossentropy loss between the labels and predictions.

Use this crossentropy loss function when there are two or more label classes. We expect labels to be provided as integers. If you want to provide labels using one-hot representation, please use CategoricalCrossentropy loss. There should be \# classes floating point values per feature for y_pred and a single floating point value per feature for y_true.

In the snippet below, there is a single floating point value per example for y_true and \# classes floating pointing values per example for y_pred. The shape of y_true is [batch_size] and the shape of y_pred is [batch_size, num_classes].

Usage:

y_true = [1, 2] y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] # Using 'auto'/'sum_over_batch_size' reduction type. scce = tf.keras.losses.SparseCategoricalCrossentropy() scce(y_true, y_pred).numpy()
import numpy as np import random import tensorflow as tf import wx unit = 80 # 一个方格所占像素 maze_height = 4 # 迷宫高度 maze_width = 4 # 迷宫宽度 class Maze(wx.Frame): def __init__(self, parent): # +16和+39为了适配客户端大小 super(Maze, self).__init__(parent, title='maze', size=(maze_width*unit+16, maze_height*unit+39)) self.actions = ['up', 'down', 'left', 'right'] self.n_actions = len(self.actions) # 按照此元组绘制坐标 self.coordinate = (0, 0) self.rl = DeepQNetwork(4, 2) self.generator = self.rl.RL_Q_network() # 使用EVT_TIMER事件和timer类可以实现间隔多长时间触发事件 self.timer = wx.Timer(self) # 创建定时器 self.timer.Start(5) # 设定时间间隔 self.Bind(wx.EVT_TIMER, self.build_maze, self.timer) # 绑定一个定时器事件 self.Show(True) def build_maze(self, event): # yield在生成器运行结束后再次调用会产生StopIteration异常, # 使用try_except语句避免出现异常并在异常出现(程序运行结束)时关闭timer try: self.generator.send(None) # 调用生成器更新位置 except Exception: self.timer.Stop() self.coordinate = self.rl.status dc = wx.ClientDC(self) self.draw_maze(dc) def draw_maze(self, dc): dc.SetBackground(wx.Brush('white')) dc.Clear() for row in range(0, maze_height*unit+1, unit): x0, y0, x1, y1 = 0, row, maze_height*unit, row dc.DrawLine(x0, y0, x1, y1) for col in range(0, maze_width*unit+1, unit): x0, y0, x1, y1 = col, 0, col, maze_width*unit dc.DrawLine(x0, y0, x1, y1) dc.SetBrush(wx.Brush('black')) dc.DrawRectangle(2*unit+10, unit+10, 60, 60) dc.SetBrush(wx.Brush('yellow')) dc.DrawRectangle(2*unit+10, 2*unit+10, 60, 60) dc.SetBrush(wx.Brush('red')) dc.DrawCircle((self.coordinate[0]+0.5)*unit, (self.coordinate[1]+0.5)*unit, 30) class DeepQNetwork(object): def __init__(self, n_actions, n_features, # 状态的属性个数(2,横坐标和纵坐标) learning_rate=0.01, reward_decay=0.9, # gamma epsilon_greedy=0.9, # epsilon replace_target_iter=300, # 更新target网络的间隔步数 buffer_size=500, # 样本缓冲区 batch_size=32, ): self.n_actions = n_actions self.n_features = n_features self.lr = learning_rate self.gamma = reward_decay self.epsilon_max = epsilon_greedy self.replace_target_iter = replace_target_iter self.buffer_size = buffer_size self.buffer_counter = 0 # 统计目前进入过buffer的数量 self.batch_size = batch_size self.epsilon = epsilon_greedy self.max_episode = 300 self.status = (0, 0) # 用于记录在运行过程中的当前位置,然后提供给Maze对象 self.learn_step_counter = 0 # 学习计步器 self.buffer = np.zeros((self.buffer_size, n_features*2+2)) # 初始化Experience buffer[s,a,r,s_] self.build_net() # 将eval网络中参数全部更新到target网络 target_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_net') eval_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='eval_net') with tf.variable_scope('soft_replacement'): self.target_replace_op = [tf.assign(t, e) for t, e in zip(target_params, eval_params)] self.sess = tf.Session() tf.summary.FileWriter('logs/', self.sess.graph) self.sess.run(tf.global_variables_initializer()) def build_net(self): self.s = tf.placeholder(tf.float32, [None, self.n_features]) self.s_ = tf.placeholder(tf.float32, [None, self.n_features]) self.r = tf.placeholder(tf.float32, [None, ]) self.a = tf.placeholder(tf.int32, [None, ]) w_initializer = tf.random_normal_initializer(0., 0.3) b_initializer = tf.constant_initializer(0.1) # q_eval网络架构,输入状态属性,输出4种动作 with tf.variable_scope('eval_net'): eval_layer = tf.layers.dense(self.s, 20, tf.nn.relu, kernel_initializer=w_initializer, bias_initializer=b_initializer, name='eval_layer') self.q_eval = tf.layers.dense(eval_layer, self.n_actions, kernel_initializer=w_initializer, bias_initializer=b_initializer, name='output_layer1') with tf.variable_scope('target_net'): target_layer = tf.layers.dense(self.s_, 20, tf.nn.relu, kernel_initializer=w_initializer, bias_initializer=b_initializer, name='target_layer') self.q_next = tf.layers.dense(target_layer, self.n_actions, kernel_initializer=w_initializer, bias_initializer=b_initializer, name='output_layer2') with tf.variable_scope('q_target'): # 计算期望价值,并使用stop_gradient函数将其不计算梯度,也就是当做常数对待 self.q_target = tf.stop_gradient(self.r + self.gamma * tf.reduce_max(self.q_next, axis=1)) with tf.variable_scope('q_eval'): # 将a的值对应起来, a_indices = tf.stack([tf.range(tf.shape(self.a)[0]), self.a], axis=1) self.q_eval_a = tf.gather_nd(params=self.q_eval, indices=a_indices) with tf.variable_scope('loss'): self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval_a)) with tf.variable_scope('train'): self.train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss) # 存储训练数据 def store_transition(self, s, a, r, s_): transition = np.hstack((s, a, r, s_)) index = self.buffer_counter % self.buffer_size self.buffer[index, :] = transition self.buffer_counter += 1 def choose_action_by_epsilon_greedy(self, status): status = status[np.newaxis, :] if random.random() < self.epsilon: actions_value = self.sess.run(self.q_eval, feed_dict={self.s: status}) action = np.argmax(actions_value) else: action = np.random.randint(0, self.n_actions) return action def learn(self): # 每学习self.replace_target_iter步,更新target网络的参数 if self.learn_step_counter % self.replace_target_iter == 0: self.sess.run(self.target_replace_op) # 从Experience buffer中选择样本 sample_index = np.random.choice(min(self.buffer_counter, self.buffer_size), size=self.batch_size) batch_buffer = self.buffer[sample_index, :] _, cost = self.sess.run([self.train_op, self.loss], feed_dict={ self.s: batch_buffer[:, :self.n_features], self.a: batch_buffer[:, self.n_features], self.r: batch_buffer[:, self.n_features+1], self.s_: batch_buffer[:, -self.n_features:] }) self.learn_step_counter += 1 return cost def get_environment_feedback(self, s, action_name): is_terminal = False if action_name == 0: # up if s == (2, 3): r = 1 is_terminal = True else: r = 0 s_ = (s[0], np.clip(s[1]-1, 0, 3)) elif action_name == 1: # down if s == (2, 0): r = -1 is_terminal = True else: r = 0 s_ = (s[0], np.clip(s[1]+1, 0, 3)) elif action_name == 2: # left if s == (3, 1): r = -1 is_terminal = True elif s == (3, 2): r = 1 is_terminal = True else: r = 0 s_ = (np.clip(s[0]-1, 0, 3), s[1]) else: # right if s == (1, 1): r = -1 is_terminal = True elif s == (1, 2): r = 1 is_terminal = True else: r = 0 s_ = (np.clip(s[0]+1, 0, 3), s[1]) return r, s_, is_terminal def RL_Q_network(self): # 使用yield函数实现同步绘图 for episode in range(self.max_episode): s = (0, 0) self.status = s is_terminal = False yield step = 0 while is_terminal is False: a = self.choose_action_by_epsilon_greedy(np.array(s)) r, s_, is_terminal = self.get_environment_feedback(s, a) self.store_transition(np.array(s), a, r, np.array(s_)) # 每5步进行一次学习 if step > 100 and step % 5 == 0: cost = self.learn() print('cost: %.3f' % cost) s = s_ self.status = s step += 1 yield if __name__ == '__main__': app = wx.App() Maze(None) app.MainLoop() 帮我分析一下这段代码吧,最好加上注释
最新发布
09-08
import tensorflow as tf import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler import os # 1. 数据预处理 (保持不变) def load_and_preprocess_data(csv_path): """加载并预处理双色球历史数据""" df = pd.read_csv(csv_path) red_balls = df.iloc[:, 0:6].values.astype(np.float32) blue_balls = df.iloc[:, 6:].values.reshape(-1, 1).astype(np.float32) red_scaler = MinMaxScaler(feature_range=(0, 1)).fit(red_balls) blue_scaler = MinMaxScaler(feature_range=(0, 1)).fit(blue_balls) red_normalized = red_scaler.transform(red_balls) blue_normalized = blue_scaler.transform(blue_balls) combined = np.hstack((red_normalized, blue_normalized)) X_train, _ = train_test_split(combined, test_size=0.2, random_state=42) return X_train, red_scaler, blue_scaler # 2. 重构GAN模型 class DualColorGAN(tf.keras.Model): def __init__(self, latent_dim=100): super(DualColorGAN, self).__init__() self.latent_dim = latent_dim # 生成器 self.generator = tf.keras.Sequential([ tf.keras.layers.Dense(256, activation='relu', input_dim=latent_dim), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(1024, activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(7, activation='sigmoid') ]) # 判别器 self.discriminator = tf.keras.Sequential([ tf.keras.layers.Dense(512, activation='relu', input_dim=7), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) # 分离优化器 (关键修复) self.g_optimizer = tf.keras.optimizers.Adam(0.0002, beta_1=0.5) self.d_optimizer = tf.keras.optimizers.Adam(0.0002, beta_1=0.5) self.loss_fn = tf.keras.losses.BinaryCrossentropy() def compile(self, **kwargs): super().compile(**kwargs) # 重构训练步骤 def train_step(self, real_data): batch_size = tf.shape(real_data)[0] # 训练判别器 with tf.GradientTape() as d_tape: noise = tf.random.normal([batch_size, self.latent_dim]) generated_data = self.generator(noise, training=False) real_output = self.discriminator(real_data, training=True) fake_output = self.discriminator(generated_data, training=True) # 损失计算 real_loss = self.loss_fn(tf.ones_like(real_output), real_output) fake_loss = self.loss_fn(tf.zeros_like(fake_output), fake_output) d_loss = (real_loss + fake_loss) / 2 # 仅更新判别器变量 d_grads = d_tape.gradient(d_loss, self.discriminator.trainable_variables) self.d_optimizer.apply_gradients(zip(d_grads, self.discriminator.trainable_variables)) # 训练生成器 with tf.GradientTape() as g_tape: noise = tf.random.normal([batch_size, self.latent_dim]) generated_data = self.generator(noise, training=True) fake_output = self.discriminator(generated_data, training=True) g_loss = self.loss_fn(tf.ones_like(fake_output), fake_output) # 仅更新生成器变量 g_grads = g_tape.gradient(g_loss, self.generator.trainable_variables) self.g_optimizer.apply_gradients(zip(g_grads, self.generator.trainable_variables)) return {"d_loss": d_loss, "g_loss": g_loss} # 3. 训练与预测 def train_and_predict(csv_path, epochs=5000, batch_size=32): X_train, red_scaler, blue_scaler = load_and_preprocess_data(csv_path) dataset = tf.data.Dataset.from_tensor_slices(X_train).shuffle(1000).batch(batch_size) gan = DualColorGAN(latent_dim=100) gan.compile() # 训练前构建模型 (避免变量延迟创建) dummy_input = tf.random.normal([1, 100]) _ = gan.generator(dummy_input) _ = gan.discriminator(dummy_input) for epoch in range(epochs): for batch in dataset: metrics = gan.train_step(batch) if epoch % 500 == 0: print(f"Epoch {epoch}, D Loss: {metrics['d_loss']:.4f}, G Loss: {metrics['g_loss']:.4f}") # 生成预测号码 (保持不变) def generate_numbers(n=5): noise = tf.random.normal(shape=(n, 100)) generated = gan.generator(noise).numpy() red_generated = generated[:, :6] blue_generated = generated[:, 6:] red_denorm = red_scaler.inverse_transform(red_generated) blue_denorm = blue_scaler.inverse_transform(blue_generated) red_denorm = np.clip(red_denorm, 1, 33).astype(int) blue_denorm = np.clip(blue_denorm, 1, 16).astype(int) results = [] for i in range(n): unique_red = np.unique(red_denorm[i]) while len(unique_red) < 6: new_red = np.random.randint(1, 34, 6 - len(unique_red)) unique_red = np.unique(np.concatenate([unique_red, new_red])) sorted_red = np.sort(unique_red[:6]) results.append({ "红球": sorted_red.tolist(), "蓝球": int(blue_denorm[i][0]) }) return results return generate_numbers() # 4. 使用示例 if __name__ == "__main__": csv_path = "D:/worker/lottery_results7.csv" predictions = train_and_predict(csv_path, epochs=300) print("\n双色球预测号码:") for i, pred in enumerate(predictions, 1): print(f"预测组 {i}: 红球: {pred['红球']}, 蓝球: {pred['蓝球']}") C:\Users\power\AppData\Roaming\Python\Python39\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs) --------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In[1], line 145 143 if __name__ == "__main__": 144 csv_path = "D:/worker/lottery_results7.csv" --> 145 predictions = train_and_predict(csv_path, epochs=300) 147 print("\n双色球预测号码:") 148 for i, pred in enumerate(predictions, 1): Cell In[1], line 103, in train_and_predict(csv_path, epochs, batch_size) 101 dummy_input = tf.random.normal([1, 100]) 102 _ = gan.generator(dummy_input) --> 103 _ = gan.discriminator(dummy_input) 105 for epoch in range(epochs): 106 for batch in dataset: File ~\AppData\Roaming\Python\Python39\site-packages\keras\src\utils\traceback_utils.py:122, in filter_traceback.<locals>.error_handler(*args, **kwargs) 119 filtered_tb = _process_traceback_frames(e.__traceback__) 120 # To get the full stack trace, call: 121 # `keras.config.disable_traceback_filtering()` --> 122 raise e.with_traceback(filtered_tb) from None 123 finally: 124 del filtered_tb File ~\AppData\Roaming\Python\Python39\site-packages\keras\src\layers\input_spec.py:227, in assert_input_compatibility(input_spec, inputs, layer_name) 222 for axis, value in spec.axes.items(): 223 if value is not None and shape[axis] not in { 224 value, 225 None, 226 }: --> 227 raise ValueError( 228 f'Input {input_index} of layer "{layer_name}" is ' 229 f"incompatible with the layer: expected axis {axis} " 230 f"of input shape to have value {value}, " 231 "but received input with " 232 f"shape {shape}" 233 ) 234 # Check shape. 235 if spec.shape is not None: ValueError: Exception encountered when calling Sequential.call(). Input 0 of layer "dense_4" is incompatible with the layer: expected axis -1 of input shape to have value 7, but received input with shape (1, 100) Arguments received by Sequential.call(): • inputs=tf.Tensor(shape=(1, 100), dtype=float32) • training=None • mask=None 根据ValueError的要求修改完善代码,并生成完整代码
08-21
import tensorflow as tf import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler import os # 1. 数据预处理 (保持不变) def load_and_preprocess_data(csv_path): """加载并预处理双色球历史数据""" df = pd.read_csv(csv_path) red_balls = df.iloc[:, 0:6].values.astype(np.float32) blue_balls = df.iloc[:, 6:].values.reshape(-1, 1).astype(np.float32) red_scaler = MinMaxScaler(feature_range=(0, 1)).fit(red_balls) blue_scaler = MinMaxScaler(feature_range=(0, 1)).fit(blue_balls) red_normalized = red_scaler.transform(red_balls) blue_normalized = blue_scaler.transform(blue_balls) combined = np.hstack((red_normalized, blue_normalized)) X_train, _ = train_test_split(combined, test_size=0.2, random_state=42) return X_train, red_scaler, blue_scaler # 2. 重构GAN模型 class DualColorGAN(tf.keras.Model): def __init__(self, latent_dim=100): super(DualColorGAN, self).__init__() self.latent_dim = latent_dim # 生成器 self.generator = tf.keras.Sequential([ tf.keras.layers.Dense(256, activation='relu', input_dim=latent_dim), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(1024, activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(7, activation='sigmoid') ]) # 判别器 self.discriminator = tf.keras.Sequential([ tf.keras.layers.Dense(512, activation='relu', input_dim=7), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) # 分离优化器 (关键修复) self.g_optimizer = tf.keras.optimizers.Adam(0.0002, beta_1=0.5) self.d_optimizer = tf.keras.optimizers.Adam(0.0002, beta_1=0.5) self.loss_fn = tf.keras.losses.BinaryCrossentropy() def compile(self, **kwargs): super().compile(**kwargs) # 重构训练步骤 def train_step(self, real_data): batch_size = tf.shape(real_data)[0] # 训练判别器 with tf.GradientTape() as d_tape: noise = tf.random.normal([batch_size, self.latent_dim]) generated_data = self.generator(noise, training=False) real_output = self.discriminator(real_data, training=True) fake_output = self.discriminator(generated_data, training=True) # 损失计算 real_loss = self.loss_fn(tf.ones_like(real_output), real_output) fake_loss = self.loss_fn(tf.zeros_like(fake_output), fake_output) d_loss = (real_loss + fake_loss) / 2 # 仅更新判别器变量 d_grads = d_tape.gradient(d_loss, self.discriminator.trainable_variables) self.d_optimizer.apply_gradients(zip(d_grads, self.discriminator.trainable_variables)) # 训练生成器 with tf.GradientTape() as g_tape: noise = tf.random.normal([batch_size, self.latent_dim]) generated_data = self.generator(noise, training=True) fake_output = self.discriminator(generated_data, training=True) g_loss = self.loss_fn(tf.ones_like(fake_output), fake_output) # 仅更新生成器变量 g_grads = g_tape.gradient(g_loss, self.generator.trainable_variables) self.g_optimizer.apply_gradients(zip(g_grads, self.generator.trainable_variables)) return {"d_loss": d_loss, "g_loss": g_loss} # 3. 训练与预测 def train_and_predict(csv_path, epochs=5000, batch_size=32): X_train, red_scaler, blue_scaler = load_and_preprocess_data(csv_path) dataset = tf.data.Dataset.from_tensor_slices(X_train).shuffle(1000).batch(batch_size) gan = DualColorGAN(latent_dim=100) gan.compile() # 训练前构建模型 (避免变量延迟创建) dummy_input = tf.random.normal([1, 100]) _ = gan.generator(dummy_input) _ = gan.discriminator(dummy_input) for epoch in range(epochs): for batch in dataset: metrics = gan.train_step(batch) if epoch % 500 == 0: print(f"Epoch {epoch}, D Loss: {metrics['d_loss']:.4f}, G Loss: {metrics['g_loss']:.4f}") # 生成预测号码 (保持不变) def generate_numbers(n=5): noise = tf.random.normal(shape=(n, 100)) generated = gan.generator(noise).numpy() red_generated = generated[:, :6] blue_generated = generated[:, 6:] red_denorm = red_scaler.inverse_transform(red_generated) blue_denorm = blue_scaler.inverse_transform(blue_generated) red_denorm = np.clip(red_denorm, 1, 33).astype(int) blue_denorm = np.clip(blue_denorm, 1, 16).astype(int) results = [] for i in range(n): unique_red = np.unique(red_denorm[i]) while len(unique_red) < 6: new_red = np.random.randint(1, 34, 6 - len(unique_red)) unique_red = np.unique(np.concatenate([unique_red, new_red])) sorted_red = np.sort(unique_red[:6]) results.append({ "红球": sorted_red.tolist(), "蓝球": int(blue_denorm[i][0]) }) return results return generate_numbers() # 4. 使用示例 if __name__ == "__main__": csv_path = "D:/worker/lottery_results7.csv" predictions = train_and_predict(csv_path, epochs=300) print("\n双色球预测号码:") for i, pred in enumerate(predictions, 1): print(f"预测组 {i}: 红球: {pred['红球']}, 蓝球: {pred['蓝球']}") C:\Users\power\AppData\Roaming\Python\Python39\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs) --------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In[1], line 145 143 if __name__ == "__main__": 144 csv_path = "D:/worker/lottery_results7.csv" --> 145 predictions = train_and_predict(csv_path, epochs=300) 147 print("\n双色球预测号码:") 148 for i, pred in enumerate(predictions, 1): Cell In[1], line 103, in train_and_predict(csv_path, epochs, batch_size) 101 dummy_input = tf.random.normal([1, 100]) 102 _ = gan.generator(dummy_input) --> 103 _ = gan.discriminator(dummy_input) 105 for epoch in range(epochs): 106 for batch in dataset: File ~\AppData\Roaming\Python\Python39\site-packages\keras\src\utils\traceback_utils.py:122, in filter_traceback.<locals>.error_handler(*args, **kwargs) 119 filtered_tb = _process_traceback_frames(e.__traceback__) 120 # To get the full stack trace, call: 121 # `keras.config.disable_traceback_filtering()` --> 122 raise e.with_traceback(filtered_tb) from None 123 finally: 124 del filtered_tb File ~\AppData\Roaming\Python\Python39\site-packages\keras\src\layers\input_spec.py:227, in assert_input_compatibility(input_spec, inputs, layer_name) 222 for axis, value in spec.axes.items(): 223 if value is not None and shape[axis] not in { 224 value, 225 None, 226 }: --> 227 raise ValueError( 228 f'Input {input_index} of layer "{layer_name}" is ' 229 f"incompatible with the layer: expected axis {axis} " 230 f"of input shape to have value {value}, " 231 "but received input with " 232 f"shape {shape}" 233 ) 234 # Check shape. 235 if spec.shape is not None: ValueError: Exception encountered when calling Sequential.call(). Input 0 of layer "dense_4" is incompatible with the layer: expected axis -1 of input shape to have value 7, but received input with shape (1, 100) Arguments received by Sequential.call(): • inputs=tf.Tensor(shape=(1, 100), dtype=float32) • training=None • mask=None 根据ValueError的要求修改完善代码
08-21
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值