1085. Perfect Sequence (25)

题目链接:1085. Perfect Sequence (25)


#include<bits/stdc++.h>
typedef long long ll;
const int maxn =100100;
using namespace std;

ll n,a[maxn],p;

int main(){
    scanf("%lld%lld",&n,&p);
    for(int i=0;i<n;i++)scanf("%lld",&a[i]);
    sort(a,a+n);
    ll t;
    ll ans=1;
    ll m,M;
    ll weizhi=0;
    for(int i=0;i<n;i++){
        m=a[i];
        t=a[i]*p;
        weizhi=lower_bound(a,a+n,t+1)-a;///找到第一个与t+1相等或者更大的数的位置
        ans=max(ans,weizhi-i);
    }
    cout<<ans<<endl;
    return 0;
}

下载方式:https://pan.quark.cn/s/a4b39357ea24 在纺织制造领域中,纱线的品质水平对最终制成品的整体质量具有决定性作用。 鉴于消费者对于产品规格和样式要求的不断变化,纺织制造工艺的执行过程日益呈现为一种更为复杂的操作体系,进而导致对纱线质量进行预测的任务变得更加困难。 在众多预测技术中,传统的预测手段在面对多变量间相互交织的复杂关系时,往往显得力不从心。 因此,智能计算技术在预测纱线质量的应用场景中逐渐占据核心地位,其中人工神经网络凭借其卓越的非线性映射特性以及自适应学习机制,成为了众多预测方法中的一种重要选择。 在智能计算技术的范畴内,粒子群优化算法(PSO)和反向传播神经网络(BP神经网络)是两种被广泛采用的技术方案。 粒子群优化算法是一种基于群体智能理念的优化技术,它通过模拟鸟类的群体觅食行为来寻求最优解,该算法因其操作简便、执行高效以及具备优秀的全局搜索性能,在函数优化、神经网络训练等多个领域得到了普遍应用。 反向传播神经网络则是一种由多层节点构成的前馈神经网络,它通过误差反向传播的机制来实现网络权重和阈值的动态调整,从而达成学习与预测的目标。 在实际操作层面,反向传播神经网络因其架构设计简洁、实现过程便捷,因此被广泛部署于各类预测和分类任务之中。 然而,该方法也存在一些固有的局限性,例如容易陷入局部最优状态、网络收敛过程缓慢等问题。 而粒子群优化算法在参与神经网络优化时,能够显著增强神经网络的全局搜索性能并提升收敛速度,有效规避神经网络陷入局部最优的困境。 将粒子群优化算法与反向传播神经网络相结合形成的PSO-BP神经网络,通过运用粒子群优化算法对反向传播神经网络的权值和阈值进行精细化调整,能够在预测纱线断裂强度方面,显著提升预测结果的...
植物实例分割数据集 一、基础信息 数据集名称:植物实例分割数据集 图片数量: - 训练集:9,600张图片 - 验证集:913张图片 - 测试集:455张图片 总计:10,968张图片 分类类别:59个类别,对应数字标签0至58,涵盖多种植物状态或特征。 标注格式:YOLO格式,适用于实例分割任务,包含多边形标注点。 数据格式:图像文件,来源于植物图像数据库,适用于计算机视觉任务。 二、适用场景 • 农业植物监测AI系统开发:数据集支持实例分割任务,帮助构建能够自动识别植物特定区域并分类的AI模型,辅助农业专家进行精准监测和分析。 • 智能农业应用研发:集成至农业管理平台,提供实时植物状态识别功能,为作物健康管理和优化种植提供数据支持。 • 学术研究与农业创新:支持植物科学与人工智能交叉领域的研究,助力发表高水平农业AI论文。 • 农业教育与培训:数据集可用于农业院校或培训机构,作为学生学习植物图像分析和实例分割技术的重要资源。 三、数据集优势 • 精准标注与多样性:标注采用YOLO格式,确保分割区域定位精确;包含59个类别,覆盖多种植物状态,具有高度多样性。 • 数据量丰富:拥有超过10,000张图像,大规模数据支持模型充分学习和泛化。 • 任务适配性强:标注兼容主流深度学习框架(如YOLO、Mask R-CNN等),可直接用于实例分割任务,并可能扩展到目标检测或分类等任务。
室内物体实例分割数据集 一、基础信息 • 数据集名称:室内物体实例分割数据集 • 图片数量: 训练集:4923张图片 验证集:3926张图片 测试集:985张图片 总计:9834张图片 • 训练集:4923张图片 • 验证集:3926张图片 • 测试集:985张图片 • 总计:9834张图片 • 分类类别: 床 椅子 沙发 灭火器 人 盆栽植物 冰箱 桌子 垃圾桶 电视 • 床 • 椅子 • 沙发 • 灭火器 • 人 • 盆栽植物 • 冰箱 • 桌子 • 垃圾桶 • 电视 • 标注格式:YOLO格式,包含实例分割的多边形标注,适用于实例分割任务。 • 数据格式:图片为常见格式如JPEG或PNG。 二、适用场景 • 实例分割模型开发:适用于训练和评估实例分割AI模型,用于精确识别和分割室内环境中的物体,如家具、电器和人物。 • 智能家居与物联网:可集成到智能家居系统中,实现自动物体检测和场景理解,提升家居自动化水平。 • 机器人导航与交互:支持机器人在室内环境中的物体识别、避障和交互任务,增强机器人智能化应用。 • 学术研究与教育:用于计算机视觉领域实例分割算法的研究与教学,助力AI模型创新与验证。 三、数据集优势 • 类别多样性:涵盖10个常见室内物体类别,包括家具、电器、人物和日常物品,提升模型在多样化场景中的泛化能力。 • 精确标注质量:采用YOLO格式的多边形标注,确保实例分割边界的准确性,适用于精细的物体识别任务。 • 数据规模充足:提供近万张标注图片,满足模型训练、验证和测试的需求,支持稳健的AI开发。 • 任务适配性强:标注格式兼容主流深度学习框架(如YOLO系列),便于快速集成到实例分割项目中,提高开发效率。
# ====================================================================================== # 1.·基础 RNN 模型构建, # 题目:使用PyTorch 构建一个简单的RNN模型,用于序列分类任务。 # 要求: # ·实现基本的 RNN 单元 # ·处理序列数据(如时间序列或文本) # ·添加全连接层进行分类. # ·实现训练过程 # 2.序列数据预处理. # 题目:实现序列数据的预处理流程,包括: # ·序列填充(padding) # ·序列截断(truncation) # ·数据标准化。 # ·创建数据加载器. # 3. 使用简单RNN和LSTM(长短时记忆网络)模型,在一个“情感分类”任务上(如IMDb电影评论)进行训练。 # 题目: # 1. 记录两个模型在测试集上的准确率。 # 2. 从测试集中找出一条非常长的、情感复杂的评论(例如,前半段好评,后半段差评)。 # 3. 报告两个模型对这条特定评论的预测结果。 # 4. 尝试解释为什么LSTM在处理这种长距离依赖的评论时,通常表现得比简单RNN更好。 # ====================================================================================== import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader, TensorDataset import numpy as np from collections import Counter import re import torch.nn.functional as F import matplotlib.pyplot as plt import random # 设置随机种子以保证结果可重现 torch.manual_seed(42) np.random.seed(42) random.seed(42) # ============================================================================ # 1. 基础RNN模型构建 # ============================================================================ class SimpleRNN(nn.Module): def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers=1, dropout=0.2): super(SimpleRNN, self).__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim) self.rnn = nn.RNN(embedding_dim, hidden_dim, n_layers, batch_first=True, dropout=dropout) self.fc = nn.Linear(hidden_dim, output_dim) self.dropout = nn.Dropout(dropout) def forward(self, x): embedded = self.dropout(self.embedding(x)) output, hidden = self.rnn(embedded) last_output = output[:, -1, :] return self.fc(last_output) class LSTMClassifier(nn.Module): def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers=1, dropout=0.2): super(LSTMClassifier, self).__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, batch_first=True, dropout=dropout) self.fc = nn.Linear(hidden_dim, output_dim) self.dropout = nn.Dropout(dropout) def forward(self, x): embedded = self.dropout(self.embedding(x)) output, (hidden, cell) = self.lstm(embedded) last_output = output[:, -1, :] return self.fc(last_output) # ============================================================================ # 2. 序列数据预处理 # ============================================================================ class TextProcessor: def __init__(self, max_vocab_size=10000, max_length=200): self.max_vocab_size = max_vocab_size self.max_length = max_length self.vocab = {} self.word2idx = {} self.idx2word = {} def preprocess_text(self, text): text = text.lower() text = re.sub(r'[^a-zA-Z\s]', '', text) tokens = text.split() return tokens def build_vocab(self, texts): counter = Counter() for text in texts: tokens = self.preprocess_text(text) counter.update(tokens) common_words = counter.most_common(self.max_vocab_size - 2) self.word2idx = {'<pad>': 0, '<unk>': 1} self.idx2word = {0: '<pad>', 1: '<unk>'} for idx, (word, _) in enumerate(common_words, start=2): self.word2idx[word] = idx self.idx2word[idx] = word self.vocab_size = len(self.word2idx) def text_to_sequence(self, text): tokens = self.preprocess_text(text) sequence = [self.word2idx.get(token, 1) for token in tokens] return sequence def pad_sequence(self, sequence): if len(sequence) > self.max_length: return sequence[:self.max_length] else: return sequence + [0] * (self.max_length - len(sequence)) def process_dataset(self, texts, labels): sequences = [] for text in texts: seq = self.text_to_sequence(text) seq = self.pad_sequence(seq) sequences.append(seq) return torch.tensor(sequences), torch.tensor(labels) def create_data_loader(texts, labels, batch_size=32, shuffle=True): processor = TextProcessor() processor.build_vocab(texts) sequences, labels_tensor = processor.process_dataset(texts, labels) dataset = TensorDataset(sequences, labels_tensor) dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle) return dataloader, processor # ============================================================================ # 3. 训练和评估函数 (不使用sklearn) # ============================================================================ def calculate_accuracy(predictions, targets): """手动计算准确率,替代sklearn的accuracy_score""" correct = (predictions == targets).sum().item() total = len(targets) return correct / total def train_model(model, train_loader, val_loader, num_epochs=10, learning_rate=0.001): criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=learning_rate) train_losses = [] val_accuracies = [] for epoch in range(num_epochs): model.train() total_loss = 0 for batch_idx, (data, target) in enumerate(train_loader): optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() total_loss += loss.item() avg_loss = total_loss / len(train_loader) train_losses.append(avg_loss) # 验证 model.eval() val_predictions = [] val_targets = [] with torch.no_grad(): for data, target in val_loader: output = model(data) pred = output.argmax(dim=1) val_predictions.append(pred) val_targets.append(target) # 合并所有批次的预测结果 val_predictions = torch.cat(val_predictions) val_targets = torch.cat(val_targets) val_accuracy = calculate_accuracy(val_predictions, val_targets) val_accuracies.append(val_accuracy) print(f'Epoch {epoch + 1}/{num_epochs}, Loss: {avg_loss:.4f}, Val Accuracy: {val_accuracy:.4f}') return train_losses, val_accuracies def evaluate_model(model, test_loader): model.eval() predictions = [] targets = [] with torch.no_grad(): for data, target in test_loader: output = model(data) pred = output.argmax(dim=1) predictions.append(pred) targets.append(target) # 合并所有批次的预测结果 predictions = torch.cat(predictions) targets = torch.cat(targets) accuracy = calculate_accuracy(predictions, targets) return accuracy, predictions.numpy(), targets.numpy() def predict_single_text(model, processor, text): model.eval() sequence = processor.text_to_sequence(text) padded_sequence = processor.pad_sequence(sequence) input_tensor = torch.tensor([padded_sequence]) with torch.no_grad(): output = model(input_tensor) probabilities = F.softmax(output, dim=1) prediction = output.argmax(dim=1).item() confidence = probabilities[0][prediction].item() sentiment = "Positive" if prediction == 1 else "Negative" return sentiment, confidence # ============================================================================ # 4. 数据生成和主程序 # ============================================================================ def create_sample_imdb_data(num_samples=1000): positive_texts = [ "This movie was absolutely fantastic! The acting was superb and the plot was engaging from start to finish.", "I loved every minute of this film. The cinematography was beautiful and the characters were well-developed.", "An outstanding performance by all actors. The story was heartwarming and inspiring.", "One of the best movies I've seen this year. Highly recommended for all movie lovers.", "The director did an amazing job with this film. The visuals were stunning and the music was perfect.", "A masterpiece of modern cinema that will be remembered for years to come.", "Brilliant storytelling combined with exceptional acting makes this a must-see movie.", "I was completely captivated from beginning to end. What an incredible film!", "The character development was phenomenal and the plot twists were unexpected yet satisfying.", "This film exceeded all my expectations. Truly a work of art in every aspect." ] negative_texts = [ "This was a terrible movie. Poor acting and a boring plot made it unwatchable.", "I was very disappointed with this film. The story made no sense and the characters were flat.", "Waste of time and money. The movie was poorly directed and the script was awful.", "One of the worst films I've ever seen. I can't believe I sat through the whole thing.", "The acting was wooden and the dialogue was cringe-worthy. Avoid this movie at all costs.", "A complete disaster from start to finish. I want my two hours back.", "The plot was predictable and the characters were one-dimensional and uninteresting.", "Poorly executed with terrible special effects and unconvincing performances.", "I struggled to stay awake during this boring and poorly written film.", "An embarrassing attempt at filmmaking that fails on every level." ] texts = [] labels = [] for _ in range(num_samples // 2): base_text = np.random.choice(positive_texts) variations = ["Really ", "Absolutely ", "Truly ", "Honestly ", "Without a doubt "] variation = np.random.choice(variations) text = variation + base_text.lower() texts.append(text) labels.append(1) base_text = np.random.choice(negative_texts) variation = np.random.choice(variations) text = variation + base_text.lower() texts.append(text) labels.append(0) return texts, labels def create_complex_review(): return """ I must admit that I was initially blown away by this movie. The opening scenes were absolutely breathtaking, with stunning cinematography that captured the beauty of every frame. The lead actor delivered a powerful performance in the first half, bringing genuine emotion and depth to their character. The storyline started strong, with an intriguing premise that promised an unforgettable cinematic experience. The musical score was perfectly matched to the tone of the film, enhancing every emotional beat. However, as the movie progressed into its second half, I found myself growing increasingly disappointed. The plot began to unravel, introducing unnecessary subplots that added nothing to the main narrative. The character development that seemed so promising early on was completely abandoned, leaving the protagonists feeling hollow and underdeveloped. By the final act, the film had devolved into a mess of clichés and predictable twists that undermined everything that had been built up earlier. The ending felt rushed and unsatisfying, as if the writers had simply run out of ideas. What started as a potential masterpiece ended up being just another forgettable Hollywood production that squandered its early promise. """ def main(): print("=" * 80) print("IMDb情感分类任务: RNN vs LSTM 比较") print("=" * 80) # 创建模拟数据 print("\n1. 创建模拟IMDb数据集...") texts, labels = create_sample_imdb_data(2000) print(f"数据集大小: {len(texts)} 条评论") print(f"正面评论: {sum(labels)}, 负面评论: {len(labels) - sum(labels)}") # 分割数据集 split_idx = int(0.8 * len(texts)) train_texts, train_labels = texts[:split_idx], labels[:split_idx] test_texts, test_labels = texts[split_idx:], labels[split_idx:] # 创建数据加载器 print("\n2. 创建数据加载器...") train_loader, processor = create_data_loader(train_texts, train_labels, batch_size=32) test_loader, _ = create_data_loader(test_texts, test_labels, batch_size=32, shuffle=False) # 初始化模型 vocab_size = processor.vocab_size embedding_dim = 100 hidden_dim = 128 output_dim = 2 print(f"\n3. 初始化模型 (词汇表大小: {vocab_size})...") rnn_model = SimpleRNN(vocab_size, embedding_dim, hidden_dim, output_dim) lstm_model = LSTMClassifier(vocab_size, embedding_dim, hidden_dim, output_dim) print(f"RNN模型参数数量: {sum(p.numel() for p in rnn_model.parameters()):,}") print(f"LSTM模型参数数量: {sum(p.numel() for p in lstm_model.parameters()):,}") # 训练RNN模型 print("\n4. 训练RNN模型...") rnn_train_loss, rnn_val_acc = train_model(rnn_model, train_loader, test_loader, num_epochs=10) # 训练LSTM模型 print("\n5. 训练LSTM模型...") lstm_train_loss, lstm_val_acc = train_model(lstm_model, train_loader, test_loader, num_epochs=10) # 评估模型 print("\n6. 在测试集上评估模型...") rnn_accuracy, rnn_preds, rnn_targets = evaluate_model(rnn_model, test_loader) lstm_accuracy, lstm_preds, lstm_targets = evaluate_model(lstm_model, test_loader) print(f"RNN测试集准确率: {rnn_accuracy:.4f}") print(f"LSTM测试集准确率: {lstm_accuracy:.4f}") # 测试复杂评论 complex_review = create_complex_review() print(f"\n7. 测试复杂评论 (前半段好评,后半段差评):") print("=" * 60) print(complex_review) print("=" * 60) rnn_sentiment, rnn_confidence = predict_single_text(rnn_model, processor, complex_review) lstm_sentiment, lstm_confidence = predict_single_text(lstm_model, processor, complex_review) print(f"\n预测结果:") print(f"RNN预测: {rnn_sentiment} (置信度: {rnn_confidence:.4f})") print(f"LSTM预测: {lstm_sentiment} (置信度: {lstm_confidence:.4f})") # 解释LSTM优势 print("\n" + "=" * 80) print("LSTM在处理长距离依赖时表现更好的原因:") print("=" * 80) reasons = [ "1. 门控机制: LSTM有输入门、遗忘门、输出门,可以精确控制信息的流动", "2. 长期记忆: LSTM的细胞状态可以保持长期信息,避免梯度消失问题", "3. 选择性记忆: LSTM可以选择记住重要信息,忘记不重要信息", "4. 上下文理解: 对于情感复杂的评论,LSTM能更好地平衡前后文信息", "5. 梯度流: 更好的梯度传播机制,缓解了简单RNN的梯度消失问题", "6. 序列建模: 能够更好地建模长序列中的依赖关系", "7. 对于这种前半段好评、后半段差评的复杂文本,LSTM能综合考虑整个序列", "8. 简单RNN容易受最近信息的影响,而LSTM能考虑整个序列的上下文" ] for reason in reasons: print(reason) # 绘制训练曲线 print("\n8. 生成训练曲线图...") plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) plt.plot(rnn_train_loss, 'b-', label='RNN Loss', linewidth=2) plt.plot(lstm_train_loss, 'r-', label='LSTM Loss', linewidth=2) plt.title('Training Loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.grid(True, alpha=0.3) plt.subplot(1, 2, 2) plt.plot(rnn_val_acc, 'b-', label='RNN Accuracy', linewidth=2) plt.plot(lstm_val_acc, 'r-', label='LSTM Accuracy', linewidth=2) plt.title('Validation Accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.legend() plt.grid(True, alpha=0.3) plt.tight_layout() plt.savefig('training_curves.png', dpi=300, bbox_inches='tight') plt.show() print("\n训练完成!图表已保存为 'training_curves.png'") if __name__ == "__main__": main() 将数据集换成IMDb数据集
11-26
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值