1094 The Largest Generation (25 分)

本文介绍了四种不同的算法来解决树的最大宽度及其所在的层级问题,包括宽度优先搜索(BFS)、深度优先搜索(DFS)及两种变体。通过对比不同算法的实现方式,深入解析了树遍历的核心思想。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

方法1

根据六度空间这道题改变而成,具体请参照六度空间题解
注意:
当n为1时需要特殊处理,后面的方法2和方法3不需要。

#include<cstdio>
#include<queue>
#include<cstring>
#include<vector>
using namespace std;
const int maxn = 105;
int n,total=0,layer=0;
vector<int> T[maxn];

void BFS(int x){
	queue<int> q;
	q.push(x);
	int end=1,start=1,last=x,tail,Layer=1;
	while(!q.empty()){
		int temp = q.front();
		q.pop();
		for(int i=0;i<T[temp].size();i++){
			{
				q.push(T[temp][i]);
				tail = T[temp][i];
				end++;
			}
		}
		if(temp == last){
			last = tail;
			Layer++;
			if(end-start > total){
				total = end-start;
				layer = Layer;
			}
			start=end;
		}	
	}
}

int main(){
	int m,u,v,dad,k,son;
	scanf("%d%d",&n,&m);
	while(m--){   //建树 
		scanf("%d%d",&dad,&k);
		for(int i=0;i<k;i++){
			scanf("%d",&son);
			T[dad].push_back(son);
		}
	}
	if(n==1){
		printf("1 1\n");
		return 0;
	}
	BFS(1);
	printf("%d %d\n",total,layer);
	return 0;
}

方法2

使用DFS深度优先搜索
树使用简洁的静态写法

#include<cstdio>
#include<queue>
#include<cstring>
#include<vector>
using namespace std;
const int maxn = 105;
int n;
vector<int> T[maxn];
int hashT[maxn];


void DFS(int index, int level){
	hashT[level]++;  //该层的数量加1
	for(int i=0;i<T[index].size();i++){
		DFS(T[index][i], level + 1);
	} 
}

int main(){
	int m,u,v,dad,k,son;
	scanf("%d%d",&n,&m);
	while(m--){   //建树 
		scanf("%d%d",&dad,&k);
		for(int i=0;i<k;i++){
			scanf("%d",&son);
			T[dad].push_back(son);
		}
	}
	memset(hashT,0,sizeof(hashT));
	DFS(1,1);
	int maxValue=-1,layer;
	for(int i=1;i<maxn;i++){
		if(hashT[i] > maxValue){
			maxValue = hashT[i];
			layer = i;
		}
	}
	printf("%d %d\n",maxValue,layer);
	return 0;
}

方法3

使用BFS广度优先搜索
树使用另一种静态写法

#include<cstdio>
#include<queue>
#include<cstring>
#include<vector>
using namespace std;
const int maxn = 105;
int n;
int hashT[maxn];

struct node{
	int layer;
	vector<int> child;
}T[maxn];


void BFS(int index,int level){
	hashT[level]++;  //该层的数量加1
	T[index].layer=level;
	queue<int> q;
	q.push(index);
	while(!q.empty()){
		int temp = q.front();
		q.pop();
		for(int i=0;i<T[temp].child.size();i++){
			int child=T[temp].child[i];
			T[child].layer = T[temp].layer + 1;
			hashT[T[child].layer]++;
			q.push(child);
		}
	}
}

int main(){
	int m,u,v,dad,k,son;
	scanf("%d%d",&n,&m);
	while(m--){   //建树 
		scanf("%d%d",&dad,&k);
		for(int i=0;i<k;i++){
			scanf("%d",&son);
			T[dad].child.push_back(son);
		}
	}
	memset(hashT,0,sizeof(hashT));
	BFS(1,1);
	int maxValue=-1,layer;
	for(int i=1;i<maxn;i++){
		if(hashT[i] > maxValue){
			maxValue = hashT[i];
			layer = i;
		}
	}
	printf("%d %d\n",maxValue,layer);
	return 0;
}

方法4

#include <cstdio>
#include <cmath>
#include <iostream>
#include <string>
#include <vector>
#include <cstring>
#include <algorithm> 
#include <set>
#include <unordered_map>
#include <queue>
#include <cmath>
#include <cctype>
#include <queue>
using namespace std;
const int N = 105;
vector<int> G[N];
int opt = -1, level;
void bfs(){
	queue<int> q;
	q.push(1);
	int l = 1;
	while(q.size()){
		int len = q.size();
		if(len > opt) {
			opt = len;
			level = l;
		}
		l++;
		for(int i = 0; i < len; i++){
			int t = q.front();
			q.pop();
			for(int j = 0 ; j < G[t].size(); j++){
				q.push(G[t][j]);
			}
		}
	}
}
int main(){
	int n, m, k, x, u;
	scanf("%d%d", &n, &m);
	while(m--){
		scanf("%d %d", &u, &k);
		for(int i = 0; i < k; i++){
			scanf("%d", &x);
			G[u].push_back(x);
		}
	}
	bfs();
	printf("%d %d\n", opt, level); 
	return 0;
}
import time import torch, torch_npu from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig # 替换成本地的模型权重路径 MODEL_PATH = "/models/z50051264/Qwen2.5-7B-Instruct" bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, # Support torch.float16, torch.float32, torch.bfloat16 bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=False, bnb_4bit_quant_storage=torch.uint8 ) torch.npu.synchronize() start_time = time.time() model = AutoModelForCausalLM.from_pretrained( MODEL_PATH, device_map={"":0}, quantization_config=bnb_config, low_cpu_mem_usage=True, torch_dtype=torch.float16 # Support torch.float16, torch.float32, torch.bfloat16 ) torch.npu.synchronize() print(f"[+] load time: {time.time() - start_time:.6}s") tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH) model.eval() prompt = "Once upon a time, " inputs = tokenizer([prompt], return_tensors="pt") input_ids = inputs.input_ids.npu() attention_mask = inputs.attention_mask.npu() torch.npu.synchronize() start_time = time.time() generated_ids = model.generate( input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=32, do_sample=False, ) torch.npu.synchronize() print(f"[+] inference time: {time.time() - start_time:.6}s") print(tokenizer.batch_decode(generated_ids)) 我在使用npu版本的bitsandbytes,但是执行以上代码,出现错误: [root@190f3c453709 inference]# python nf4.py /usr/local/python3.10.17/lib/python3.10/site-packages/torch_npu/utils/storage.py:38: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() if self.device.type != &#39;cpu&#39;: Loading checkpoint shards: 100%|█████████████████████████████████████████████████████████| 4/4 [00:13<00:00, 3.26s/it] [+] load time: 14.9728s The following generation flags are not valid and may be ignored: [&#39;temperature&#39;, &#39;top_p&#39;, &#39;top_k&#39;]. Set `TRANSFORMERS_VERBOSITY=info` for more details. [+] inference time: 3.78472s [&#39;Once upon a time, 123456789 was the largest known prime number. If a new prime number, 123456789&#39;] 请析问题原因,并给出详细解决方法
最新发布
07-23
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值