random numbers (Expand a random range from 1–5 to 1–7)

本文介绍了一个利用已有的1到5范围内的随机数生成器来创建1到7范围内随机数的方法。通过构建一个5x5的矩阵,每次调用两次1到5的随机数生成器来获取矩阵中的值,直到返回一个1到7之间的有效随机数。

这道题目是面 fuhu online test 做的三道之一。实现其实很简单,最重要的是想到方法。

Given a function which produces a random integer in the range 1 to 5, write a function which produces a random integer in the range 1 to 7.

int random5();
int random7() {
	int vals[5][5] = {
		{1,2,3,4,5},
		{6,7,1,2,3},
		{4,5,6,7,1},
		{2,3,4,5,6},
		{7,0,0,0,0}
	};
	int result = 0;
	while (result == 0) {
		int i = random5();
		int j = random5();
		result = vals[i-1][j-1];
	}
	return result;
}


    import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler import tensorflow from sklearn.model_selection import train_test_split from tensorflow.keras.utils import to_categorical import tensorflow as tf from tensorflow.keras import Sequential from tensorflow.keras.optimizers import SGD from tensorflow.keras.layers import Dense, Dropout from tensorflow.keras.layers import Embedding from tensorflow.keras.layers import LSTM tf.keras.backend.clear_session() from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay from sklearn import datasets, tree, linear_model, svm from sklearn.metrics import confusion_matrix, classification_report from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import confusion_matrix, classification_report from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import confusion_matrix import seaborn as sns import tensorflow as tf print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU'))) data = pd.read_csv("./emotions.csv") # Seprarting Positive,Neagtive and Neutral dataframes for plortting pos = data.loc[data["label"] == "POSITIVE"] sample_pos = pos.loc[2, 'fft_0_b':'fft_749_b'] neg = data.loc[data["label"] == "NEGATIVE"] sample_neg = neg.loc[0, 'fft_0_b':'fft_749_b'] neu = data.loc[data["label"] == "NEUTRAL"] sample_neu = neu.loc[1, 'fft_0_b':'fft_749_b'] #plottintg Dataframe distribution plt.figure(figsize=(25,7)) plt.title("Data distribution of Emotions") plt.style.use('fivethirtyeight') sns.countplot(x='label', data=data) plt.show() #Plotting Positive DataFrame plt.figure(figsize=(16, 10)) plt.plot(range(len(sample_pos)), sample_pos) plt.title("Graph of Positive Columns") plt.show() '''As we can noticed the most of the Negative Signals are from greater than 600 to and less than than -600''' #Plotting Negative DataFrame plt.figure(figsize=(16, 10)) plt.plot(range(len(sample_neg)), sample_neg) plt.title("Graph of Negative Columns") plt.show() #Plotting Neutral DataFrame plt.figure(figsize=(16, 10)) plt.plot(range(len(sample_neu)), sample_neu) plt.title("Graph of Neutral Columns") plt.show() def Transform_data(data): #Encoding Lables into numbers encoding_data = ({'NEUTRAL': 0, 'POSITIVE': 1, 'NEGATIVE': 2} ) data_encoded = data.replace(encoding_data) #getting brain signals into x variable x=data_encoded.drop(["label"] ,axis=1) #getting labels into y variable y = data_encoded.loc[:,'label'].values scaler = StandardScaler() #scaling Brain Signals scaler.fit(x) X = scaler.transform(x) #One hot encoding Labels Y = to_categorical(y) return X,Y #Calling above function and splitting dataset into train and test X,Y = Transform_data(data) x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 4) if tf.config.list_physical_devices('GPU'): print('Using GPU') with tf.device('/GPU:0'): # 原代码中的模型创建、编译和训练部分 # ... # 示例:创建模型 def create_model(): # input layer of model for brain signals inputs = tf.keras.Input(shape=(x_train.shape[1],)) # Hidden Layer for Brain signal using LSTM(GRU) expand_dims = tf.expand_dims(inputs, axis=2) gru = tf.keras.layers.GRU(256, return_sequences=True)(expand_dims) # Flatten Gru layer into vector form (one Dimensional array) flatten = tf.keras.layers.Flatten()(gru) # output latyer of Model outputs = tf.keras.layers.Dense(3, activation='softmax')(flatten) model = tf.keras.Model(inputs=inputs, outputs=outputs) print(model.summary()) return model # cretaing model lstmmodel = create_model() # Compiling model lstmmodel.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] ) # Training and Evaluting model history = lstmmodel.fit(x_train, y_train, epochs = 10, validation_split=0.1) loss, acc = lstmmodel.evaluate(x_test, y_test) # Loss and Accuracy of model on Testiong Dataset print(f"Loss on testing: {loss*100}",f"\nAccuracy on Training: {acc*100}") # predicting model on test set for plotting Confusion Matrix pred = lstmmodel.predict(x_test) else: print('Using CPU') # Creation of Function of Confusion Matrix def plot_confusion_matrix(cm, names, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(data.label.unique())) plt.xticks(tick_marks, names, rotation=90) plt.yticks(tick_marks, names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # after getting prediction checking maximum score prediction to claim which emotion this brain signal belongs to pred1 = np.argmax(pred, axis=1) # inversing the one hot encoding y_test1 = np.argmax(y_test, axis=1) # printing first 10 Actual and predicted outputs of Test brain signals print("Predicted: ", pred1[:10]) print("\n") print("Actual: ", y_test1[:10]) # Plotting Confusion matrix of Lstm Model cm = confusion_matrix(y_test1, pred1) np.set_printoptions(precision=2) print('Confusion matrix, without normalization') print(cm) plt.rcParams["figure.figsize"] = (20, 5) plt.figure() plot_confusion_matrix(cm, ["Neutral", "Positive", "Negative"])运行后出现Traceback (most recent call last): File "C:\Users\86139\Desktop\pythonProject1\main.py", line 147, in <module> pred1 = np.argmax(pred, axis=1) ^^^^ NameError: name 'pred' is not defined Using CPU,我需要解决问题并使用GPU
    11-19
    class IntegerEdgeCases(Mutator): """ Generate integer edge cases. The numbers produced are distributed over a bell curve with the edge case as the center. """ def __init__(self, obj, rand): Mutator.__init__(self) min_, max_ = self.get_limits(obj) delta = max_ - min_ if 0xff >= delta >= 0: # We are <= a single byte, set the space size of the range self.space = delta + 1 self.full_space = self.space def sequential(): return min_ + self.mutation_index def random(): return rand.randint(min_, max_) self.sequential_generator = sequential self.random_generator = random else: # For more than a single byte, use edge case generator self.generator = EdgeCaseGenerator(min_, max_) self.space = len(self.generator.values) self.full_space = self.generator.deviation def sequential(): return self.generator.values[self.mutation_index] def random(): return self.generator.gen_next(rand) self.sequential_generator = sequential self.random_generator = random def get_limits(self, obj): """ Get the minimum and maximum values to generate edge cases for. :param obj: The element this mutator is bound to. :return: The minimum value of the number space, The maximum value of the number space. """ return 0, sys.maxsize def perform_mutation(self, obj, value): """ Mutate the data element. :param obj: The element to mutate. :param value: The value to use when mutating. :return: """ pass def mutate(self, obj, gen): value = gen() self.perform_mutation(obj, value) def sequential_mutation(self, obj): self.mutate(obj, self.sequential_generator) def random_mutation(self, obj): self.mutate(obj, self.random_generator) def get_count(self): return self.space class EdgeCaseGenerator: """ Computes the edge cases of numbers inside a given space. """ SCHAR_MIN = -128 SCHAR_MAX = 127 UCHAR_MAX = 255 CHAR_MIN = -128 CHAR_MAX = 127 SHRT_MIN = -32768 SHRT_MAX = 32767 USHRT_MAX = 65535 INT_MIN = -2147483647 - 1 INT_MAX = 2147483647 UINT_MAX = 4294967295 LONG_MIN = -2147483647 - 1 LONG_MAX = 2147483647 ULONG_MAX = 4294967295 LLONG_MIN = -9223372036854775807 - 1 LLONG_MAX = 9223372036854775807 ULLONG_MAX = 18446744073709551615 EDGES = [SCHAR_MIN, SCHAR_MAX, UCHAR_MAX, CHAR_MIN, CHAR_MAX, SHRT_MIN, SHRT_MAX, USHRT_MAX, INT_MIN, INT_MAX, UINT_MAX, LONG_MIN, LONG_MAX, ULONG_MAX, LLONG_MIN, LLONG_MAX, ULLONG_MAX, 0] # The maximum range to pick numbers around each edge max_range = 0x4000 def __init__(self, min_num, max_num): # The list of edge cases in the number space. self.edges = self.EDGES self.min_num = min_num self.max_num = max_num self.edges = [num for num in self.edges if max_num >= num >= min_num] self.edges.append(min_num) self.edges.append(max_num) self.edges = list(set(self.edges)) self.edges.sort() self.ranges = [] self.stddev = [] self.weights = [] # The sum of all the standard deviations at each edge case self.deviation = 0 # Precomputed list of values for sequential mutator self.values = [] # Compute range and standard deviation for i in range(len(self.edges)): edge = self.edges[i] if edge <= 0 and edge != max_num: # If edge <= 0, it is the distance to next edge v = self.edges[i + 1] - edge else: # If edge > 0, it is the distance to the previous edge v = edge - self.edges[i - 1] # Cap the +/- range to a maximum v = min(v, self.max_range) self.ranges.append(v) # We want the distribution to be a bell curve over # 2x the range. This means stddev should be 2*range / 6 # since 99% of the numbers will be 3 stddev away from the mean # Also, round up so if v is < 3, we get at least 1 sigma = float(v + 2) / 3 self.stddev.append(sigma) # The 1st and last edge get 1/2 the weight since they are half curves # We still store the full deviation but do an abs() of the next rng if i == 0 or i == len(self.edges) - 1: # Round up so we always have a valid stddev sigma = (sigma + 1) / 2 self.deviation += sigma # Weight each edge based on its range sum_ = 0 for i in range(len(self.edges)): weight = 1.0 - (1.0 * sum_ / self.deviation) self.weights.append(weight) s = self.stddev[i] # The 1st and last edge get 1/2 the weight since they are half curves # We still store the full deviation but do an abs() of the next rng if i == 0 or i == len(self.edges) - 1: s /= 2 sum_ += s # Compute a list of +/- delta around each edge for i in range(len(self.edges)): if i != 0: for j in range(-50, 0): self.values.append(self.edges[i] + j) self.values.append(self.edges[i]) if i != len(self.edges) - 1: for j in range(1, 51): self.values.append(self.edges[i] + j) def range(self, edge_index): return self.ranges[edge_index] def gen_next(self, rand): r = rand.random() i = len(self.weights) - 1 while self.weights[i] <= r: i -= 1 edge = self.edges[i] sigma = self.stddev[i] while True: num = rand.gauss(0, 1) if (num < 0 and edge == self.min_num) or (num > 0 and edge == self.max_num): num *= -1 num = num * sigma if edge == self.min_num: num = math.floor(num) elif edge == self.max_num: num = math.ceil(num) else: num = round(num) ret = edge + int(num) if self.max_num >= ret >= self.min_num: return int(ret)这是python版本代码,我要转为cpp版本,下面是已经完成的部分#ifndef MUTATOR_STRINGLENGTHEDGECASE_H #define MUTATOR_STRINGLENGTHEDGECASE_H #include <vector> #include <memory> #include "../utils.h" #include "../../data_types/string_.h" #include "../base.h" class StringLengthEdgeCase : public IntegerEdgeCases { public: StringLengthEdgeCase(std::shared_ptr<StringBase> obj, std::mt19937 &rand); std::pair<int64_t, int64_t> get_limits(std::shared_ptr<StringBase> obj) override; std::vector<std::shared_ptr<StringBase>> mutated_elements; void perform_mutation(std::shared_ptr<StringBase> obj, std::string value) override; void sequential_mutation(std::shared_ptr<StringBase> obj) override; void random_muatation(std::shared_ptr<StringBase> obj) override; bool supported(std::shared_ptr<StringBase> obj) override; private: }; #endif //MUTATOR_STRINGLENGTHEDGECASE_H #include "StringLengthEdgeCase.h" #include <algorithm> #include "../utils.h" StringLengthEdgeCase::StringLengthEdgeCase(std::shared_ptr<StringBase> obj, std::mt19937 &rand) : IntegerEdgeCases(obj, rand) {} std::pair<int64_t, int64_t> StringLengthEdgeCase::get_limits(std::shared_ptr<StringBase> obj) { int64_t max_ = obj->mutator_config.max_output_size * 8; int64_t size = obj->calculate_bit_size(); int64_t limit = std::max(max_, size); limit = (limit + 7) / 8; max_ = limit; if (obj->max_length != -INT64_MAX) { max_ = std::min(max_, obj->max_length); } int64_t min_ = 0; if (obj->min_length != INT64_MAX) { min_ = std::max(min_, obj->min_length); } return {min_, max_}; } bool StringLengthEdgeCase::supported(std::shared_ptr<StringBase> obj) { return std::dynamic_pointer_cast<String>(obj) != nullptr; } void StringLengthEdgeCase::perform_mutation(std::shared_ptr<StringBase> obj, std::string value) { uint64_t valueLength = value.size(); obj->mutated_value = expand(obj->value, valueLength); obj->mutated = true; mutated_elements.push_back(obj); } void StringLengthEdgeCase: 使用的基类我已经完成,需要在当前代码中针对StringLengthEdgeCase重写一下sequential_mutation和random_muatation对面
    07-02
    下载方式:https://pan.quark.cn/s/c9b9b647468b ### 初级JSP程序设计教程核心内容解析#### 一、JSP基础概述JSP(JavaServer Pages)是由Sun Microsystems公司创建的一种动态网页技术规范,主要应用于构建动态网站及Web应用。JSP技术使得开发者能够将动态数据与静态HTML文档整合,从而实现网页内容的灵活性和可变性。##### JSP的显著特性:1. **动态与静态内容的分离**:JSP技术支持将动态数据(例如数据库查询结果、实时时间等)嵌入到静态HTML文档中。这种设计方法增强了网页的适应性和可维护性。2. **易用性**:开发者可以利用常规的HTML编辑工具来编写静态部分,并通过简化的标签技术将动态内容集成到页面中。3. **跨平台兼容性**:基于Java平台的JSP具有优良的跨操作系统运行能力,能够在多种不同的系统环境中稳定工作。4. **强大的后台支持**:JSP能够通过JavaBean组件访问后端数据库及其他资源,以实现复杂的数据处理逻辑。5. **执行效率高**:JSP页面在初次被请求时会被转换为Servlet,随后的请求可以直接执行编译后的Servlet代码,从而提升了服务响应的效率。#### 二、JSP指令的运用JSP指令用于设定整个JSP页面的行为规范。这些指令通常放置在页面的顶部,向JSP容器提供处理页面的相关指导信息。##### 主要的指令类型:1. **Page指令**: - **语法结构**:`<%@ page attribute="value" %>` - **功能**:定义整个JSP页面的运行特性,如设定页面编码格式、错误处理机制等。 - **实例**: ...
    评论
    成就一亿技术人!
    拼手气红包6.0元
    还能输入1000个字符
     
    红包 添加红包
    表情包 插入表情
     条评论被折叠 查看
    添加红包

    请填写红包祝福语或标题

    红包个数最小为10个

    红包金额最低5元

    当前余额3.43前往充值 >
    需支付:10.00
    成就一亿技术人!
    领取后你会自动成为博主和红包主的粉丝 规则
    hope_wisdom
    发出的红包
    实付
    使用余额支付
    点击重新获取
    扫码支付
    钱包余额 0

    抵扣说明:

    1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
    2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

    余额充值