#116-【模拟】Shuffle them up

本文介绍了一种用于计算两个字符串通过特定规则变换达到目标字符串所需次数的算法。该算法使用了辅助数据结构set来记录变换过程中的状态,避免重复计算,有效判断是否能够通过变换达到目标字符串。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

Description

给定两个长度为len的字符串s1和s2, 接着给出一个长度为len*2的字符串s12。
将字符串s1和s2通过一定的变换变成s12,找到变换次数
变换规则如下:
假设s1=12345,s2=67890
变换后的序列 s=6172839405
如果s和s12完全相等那么输出变换次数
如果不完全相等,s的前半部分作为s1,后半部分作为s2,重复上述过程。

 

Input

第一行给出T(1≤T≤1000),代表有T组数据,每组数据首先给出len(1≤len≤100),接着给出两个长度为len的字符串s1 s2 然后给出长度为len*2的字符串s12。

Output

首先输出处理数据组的编号(编号从1开始)

再输出变换次数并换行。

注意两个数字之间有空格。

对于变换次数,如果无需变换直接得到s12,那么输出0,如果无论怎么变换都不会得到s12,那么输出 -1。

Sample Input

2
4
AHAH
HAHA
HHAAAAHH
3
CDE
CDE
EEDDCC

Sample Output

1 2
2 -1

 

直接模拟,如果反而到了之前出现的状态,那显然不可能。

#include <iostream>
#include <string>
#include <set>

using namespace std;

string s1, s2, s3, temp;
set<string> s;

int main(int argc, char** argv)
{
	int t, n, res, _case = 0, i;
	
	scanf("%d", &t);
	while (t--)
	{
		res = 0;
		scanf("%d", &n);
		cin >> s1 >> s2 >> s3;
		printf("%d ", ++_case);
		if (s1 + s2 == s3)
		{
			printf("0\n");
			continue;
		}
		temp = "";
		for (i = 1; i <= n + n; ++i)
		{
			temp += ' '; // 防止下标越界
		}
		s.clear();
		s.insert(s1 + s2);
		for ( ; ; ) // 模拟
		{
			for (i = 0; i < n; ++i)
			{
				temp[i+i] = s2[i];
				temp[i+i+1] = s1[i];
			}
			++res;
			if (temp == s3)
			{
				printf("%d\n", res);
				break;
			}
			if (s.count(temp)) // 如果回到了之前到状态,那么显然不可能做到
			{
				printf("-1\n");
				break;
			}
			s.insert(temp);
			s1 = temp.substr(0, n);
			s2 = temp.substr(n, n);
		}
	}
	
	return 0;
}

 

class EUCB(nn.Module): def __init__(self, in_channels, kernel_size=3, stride=1): super(EUCB,self).__init__() self.in_channels = in_channels self.out_channels = in_channels self.up_dwc = nn.Sequential( nn.Upsample(scale_factor=2), Conv(self.in_channels, self.in_channels, kernel_size, g=self.in_channels, s=stride) ) self.pwc = nn.Sequential( nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1, stride=1, padding=0, bias=True) ) def forward(self, x): x = self.up_dwc(x) x = self.channel_shuffle(x, self.in_channels) x = self.pwc(x) return x def channel_shuffle(self, x, groups): batchsize, num_channels, height, width = x.data.size() channels_per_group = num_channels // groups x = x.view(batchsize, groups, channels_per_group, height, width) x = torch.transpose(x, 1, 2).contiguous() x = x.view(batchsize, -1, height, width) return x # Multi-scale depth-wise convolution (MSDC) class MSDC(nn.Module): def __init__(self, in_channels, kernel_sizes, stride, dw_parallel=True): super(MSDC, self).__init__() self.in_channels = in_channels self.kernel_sizes = kernel_sizes self.dw_parallel = dw_parallel self.dwconvs = nn.ModuleList([ nn.Sequential( Conv(self.in_channels, self.in_channels, kernel_size, s=stride, g=self.in_channels) ) for kernel_size in self.kernel_sizes ]) def forward(self, x): # Apply the convolution layers in a loop outputs = [] for dwconv in self.dwconvs: dw_out = dwconv(x) outputs.append(dw_out) if self.dw_parallel == False: x = x+dw_out # You can return outputs based on what you intend to do with them return outputs class MSCB(nn.Module): """ Multi-scale convolution block (MSCB) """ def __init__(self, in_channels, out_channels, kernel_sizes=[1,3,5], stride=1, expansion_factor=2, dw_parallel=True, add=True): super(MSCB, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.stride = stride self.kernel_sizes = kernel_sizes self.expansion_factor = expansion_factor self.dw_parallel = dw_parallel self.add = add self.n_scales = len(self.kernel_sizes) # check stride value assert self.stride in [1, 2] # Skip connection if stride is 1 self.use_skip_connection = True if self.stride == 1 else False # expansion factor self.ex_channels = int(self.in_channels * self.expansion_factor) self.pconv1 = nn.Sequential( # pointwise convolution Conv(self.in_channels, self.ex_channels, 1) ) self.msdc = MSDC(self.ex_channels, self.kernel_sizes, self.stride, dw_parallel=self.dw_parallel) if self.add == True: self.combined_channels = self.ex_channels*1 else: self.combined_channels = self.ex_channels*self.n_scales self.pconv2 = nn.Sequential( # pointwise convolution Conv(self.combined_channels, self.out_channels, 1, act=False) ) if self.use_skip_connection and (self.in_channels != self.out_channels): self.conv1x1 = nn.Conv2d(self.in_channels, self.out_channels, 1, 1, 0, bias=False) def forward(self, x): pout1 = self.pconv1(x) msdc_outs = self.msdc(pout1) if self.add == True: dout = 0 for dwout in msdc_outs: dout = dout + dwout else: dout = torch.cat(msdc_outs, dim=1) dout = self.channel_shuffle(dout, math.gcd(self.combined_channels,self.out_channels)) out = self.pconv2(dout) if self.use_skip_connection: if self.in_channels != self.out_channels: x = self.conv1x1(x) return x + out else: return out def channel_shuffle(self, x, groups): batchsize, num_channels, height, width = x.data.size() channels_per_group = num_channels // groups x = x.view(batchsize, groups, channels_per_group, height, width) x = torch.transpose(x, 1, 2).contiguous() x = x.view(batchsize, -1, height, width) return x class CSP_MSCB(C2f): def __init__(self, c1, c2, n=1, kernel_sizes=[1,3,5], shortcut=False, g=1, e=0.5): super().__init__(c1, c2, n, shortcut, g, e) self.m = nn.ModuleList(MSCB(self.c, self.c, kernel_sizes=kernel_sizes) for _ in range(n))我用这个涨点# Ultralytics YOLO 🚀, AGPL-3.0 license # YOLO11 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect # Parameters nc: 80 # number of classes scales: # model compound scaling constants, i.e. &#39;model=yolo11n.yaml&#39; will call yolo11.yaml with scale &#39;n&#39; # [depth, width, max_channels] n: [0.50, 0.25, 1024] # summary: 319 layers, 2624080 parameters, 2624064 gradients, 6.6 GFLOPs s: [0.50, 0.50, 1024] # summary: 319 layers, 9458752 parameters, 9458736 gradients, 21.7 GFLOPs m: [0.50, 1.00, 512] # summary: 409 layers, 20114688 parameters, 20114672 gradients, 68.5 GFLOPs l: [1.00, 1.00, 512] # summary: 631 layers, 25372160 parameters, 25372144 gradients, 87.6 GFLOPs x: [1.00, 1.50, 512] # summary: 631 layers, 56966176 parameters, 56966160 gradients, 196.0 GFLOPs fusion_mode: bifpn node_mode: CSP_MSCB head_channel: 256 # YOLO11n backbone backbone: # [from, repeats, module, args] - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 2, C3k2, [256, False, 0.25]] - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 2, C3k2, [512, False, 0.25]] - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 2, C3k2, [512, True]] - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 2, C3k2, [1024, True]] - [-1, 1, SPPF, [1024, 5]] # 9 - [-1, 2, C2PSA, [1024]] # 10 # YOLO11n head head: - [4, 1, Conv, [head_channel]] # 11-P3/8 - [6, 1, Conv, [head_channel]] # 12-P4/16 - [10, 1, Conv, [head_channel]] # 13-P5/32 - [12, 1, Conv, [head_channel, 3, 2]] # 14-P5/32 - [[-1, 13], 1, Fusion, [fusion_mode]] # 15 - [-1, 3, node_mode, [head_channel, [5,7,9]]] # 16-P5/32 - [-1, 1, EUCB, []] # 17-P4/16 - [11, 1, Conv, [head_channel, 3, 2]] # 18-P4/16 - [[-1, -2, 12], 1, Fusion, [fusion_mode]] # 19 - [-1, 3, node_mode, [head_channel, [3,5,7]]] # 20-P4/16 - [-1, 1, EUCB, []] # 21-P3/8 - [2, 1, Conv, [head_channel, 3, 2]] # 22-P3/8 - [[-1, -2, 11], 1, Fusion, [fusion_mode]] # 23 - [-1, 3, node_mode, [head_channel, [1,3,5]]] # 24-P3/8 - [[21, -1], 1, Fusion, [fusion_mode]] # 25 - [-1, 3, node_mode, [head_channel, [1,3,5]]] # 26-P3/8 - [24, 1, Conv, [head_channel, 3, 2]] # 27-P4/16 - [26, 1, Conv, [head_channel, 3, 2]] # 28-P4/16 - [[-1, -2, 20, 17], 1, Fusion, [fusion_mode]] # 29-P4/16 - [-1, 3, node_mode, [head_channel, [3,5,7]]] # 30-P4/16 - [20, 1, Conv, [head_channel, 3, 2]] # 31-P5/32 - [30, 1, Conv, [head_channel, 3, 2]] # 32-P5/32 - [[-1, -2, 16], 1, Fusion, [fusion_mode]] # 33-P5/32 - [-1, 3, node_mode, [head_channel, [5,7,9]]] # 34-P5/32 - [[26, 30, 34], 1, Detect, [nc]] # Detect(P3, P4, P5)
06-07
内容概要:《中文大模型基准测评2025年上半年报告》由SuperCLUE团队发布,详细评估了2025年上半年中文大模型的发展状况。报告涵盖了大模型的关键进展、国内外大模型全景图及差距、专项测评基准介绍等。通过SuperCLUE基准,对45个国内外代表性大模型进行了六大任务(数学推理、科学推理、代码生成、智能体Agent、精确指令遵循、幻觉控制)的综合测评。结果显示,海外模型如o3、o4-mini(high)在推理任务上表现突出,而国内模型如Doubao-Seed-1.6-thinking-250715在智能体Agent幻觉控制任务上表现出色。此外,报告还分析了模型性价比、效能区间分布,并对代表性模型如Doubao-Seed-1.6-thinking-250715、DeepSeek-R1-0528、GLM-4.5等进行了详细介绍。整体来看,国内大模型在特定任务上已接近国际顶尖水平,但在综合推理能力上仍有提升空间。 适用人群:对大模型技术感兴趣的科研人员、工程师、产品经理及投资者。 使用场景及目标:①了解2025年上半年中文大模型的发展现状与趋势;②评估国内外大模型在不同任务上的表现差异;③为技术选型性能优化提供参考依据。 其他说明:报告提供了详细的测评方法、评分标准及结果分析,确保评估的科学性公正性。此外,SuperCLUE团队还发布了多个专项测评基准,涵盖多模态、文本、推理等多个领域,为业界提供全面的测评服务。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值